diff -Nru beets-1.3.8+dfsg/beets/art.py beets-1.3.19/beets/art.py --- beets-1.3.8+dfsg/beets/art.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beets/art.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""High-level utilities for manipulating image files associated with +music and items' embedded album art. +""" + +from __future__ import division, absolute_import, print_function + +import subprocess +import platform +from tempfile import NamedTemporaryFile +import imghdr +import os + +from beets.util import displayable_path, syspath, bytestring_path +from beets.util.artresizer import ArtResizer +from beets import mediafile + + +def mediafile_image(image_path, maxwidth=None): + """Return a `mediafile.Image` object for the path. + """ + + with open(syspath(image_path), 'rb') as f: + data = f.read() + return mediafile.Image(data, type=mediafile.ImageType.front) + + +def get_art(log, item): + # Extract the art. + try: + mf = mediafile.MediaFile(syspath(item.path)) + except mediafile.UnreadableFileError as exc: + log.warning(u'Could not extract art from {0}: {1}', + displayable_path(item.path), exc) + return + + return mf.art + + +def embed_item(log, item, imagepath, maxwidth=None, itempath=None, + compare_threshold=0, ifempty=False, as_album=False): + """Embed an image into the item's media file. + """ + # Conditions and filters. + if compare_threshold: + if not check_art_similarity(log, item, imagepath, compare_threshold): + log.info(u'Image not similar; skipping.') + return + if ifempty and get_art(log, item): + log.info(u'media file already contained art') + return + if maxwidth and not as_album: + imagepath = resize_image(log, imagepath, maxwidth) + + # Get the `Image` object from the file. + try: + log.debug(u'embedding {0}', displayable_path(imagepath)) + image = mediafile_image(imagepath, maxwidth) + except IOError as exc: + log.warning(u'could not read image file: {0}', exc) + return + + # Make sure the image kind is safe (some formats only support PNG + # and JPEG). + if image.mime_type not in ('image/jpeg', 'image/png'): + log.info('not embedding image of unsupported type: {}', + image.mime_type) + return + + item.try_write(path=itempath, tags={'images': [image]}) + + +def embed_album(log, album, maxwidth=None, quiet=False, + compare_threshold=0, ifempty=False): + """Embed album art into all of the album's items. + """ + imagepath = album.artpath + if not imagepath: + log.info(u'No album art present for {0}', album) + return + if not os.path.isfile(syspath(imagepath)): + log.info(u'Album art not found at {0} for {1}', + displayable_path(imagepath), album) + return + if maxwidth: + imagepath = resize_image(log, imagepath, maxwidth) + + log.info(u'Embedding album art into {0}', album) + + for item in album.items(): + embed_item(log, item, imagepath, maxwidth, None, + compare_threshold, ifempty, as_album=True) + + +def resize_image(log, imagepath, maxwidth): + """Returns path to an image resized to maxwidth. + """ + log.debug(u'Resizing album art to {0} pixels wide', maxwidth) + imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath)) + return imagepath + + +def check_art_similarity(log, item, imagepath, compare_threshold): + """A boolean indicating if an image is similar to embedded item art. + """ + with NamedTemporaryFile(delete=True) as f: + art = extract(log, f.name, item) + + if art: + is_windows = platform.system() == "Windows" + + # Converting images to grayscale tends to minimize the weight + # of colors in the diff score. So we first convert both images + # to grayscale and then pipe them into the `compare` command. + # On Windows, ImageMagick doesn't support the magic \\?\ prefix + # on paths, so we pass `prefix=False` to `syspath`. + convert_cmd = [b'convert', syspath(imagepath, prefix=False), + syspath(art, prefix=False), + b'-colorspace', b'gray', b'MIFF:-'] + compare_cmd = [b'compare', b'-metric', b'PHASH', b'-', b'null:'] + log.debug(u'comparing images with pipeline {} | {}', + convert_cmd, compare_cmd) + convert_proc = subprocess.Popen( + convert_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=not is_windows, + ) + compare_proc = subprocess.Popen( + compare_cmd, + stdin=convert_proc.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=not is_windows, + ) + + # Check the convert output. We're not interested in the + # standard output; that gets piped to the next stage. + convert_proc.stdout.close() + convert_stderr = convert_proc.stderr.read() + convert_proc.stderr.close() + convert_proc.wait() + if convert_proc.returncode: + log.debug( + u'ImageMagick convert failed with status {}: {!r}', + convert_proc.returncode, + convert_stderr, + ) + return + + # Check the compare output. + stdout, stderr = compare_proc.communicate() + if compare_proc.returncode: + if compare_proc.returncode != 1: + log.debug(u'ImageMagick compare failed: {0}, {1}', + displayable_path(imagepath), + displayable_path(art)) + return + out_str = stderr + else: + out_str = stdout + + try: + phash_diff = float(out_str) + except ValueError: + log.debug(u'IM output is not a number: {0!r}', out_str) + return + + log.debug(u'ImageMagick copmare score: {0}', phash_diff) + return phash_diff <= compare_threshold + + return True + + +def extract(log, outpath, item): + art = get_art(log, item) + outpath = bytestring_path(outpath) + if not art: + log.info(u'No album art present in {0}, skipping.', item) + return + + # Add an extension to the filename. + ext = imghdr.what(None, h=art) + if not ext: + log.warning(u'Unknown image type in {0}.', + displayable_path(item.path)) + return + outpath += bytestring_path('.' + ext) + + log.info(u'Extracting album art from: {0} to: {1}', + item, displayable_path(outpath)) + with open(syspath(outpath), 'wb') as f: + f.write(art) + return outpath + + +def extract_first(log, outpath, items): + for item in items: + real_path = extract(log, outpath, item) + if real_path: + return real_path + + +def clear(log, lib, query): + items = lib.items(query) + log.info(u'Clearing album art from {0} items', len(items)) + for item in items: + log.debug(u'Clearing art for {0}', item) + item.try_write(tags={'images': None}) diff -Nru beets-1.3.8+dfsg/beets/autotag/hooks.py beets-1.3.19/beets/autotag/hooks.py --- beets-1.3.8+dfsg/beets/autotag/hooks.py 2014-04-12 20:56:51.000000000 +0000 +++ beets-1.3.19/beets/autotag/hooks.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,14 +14,18 @@ # included in all copies or substantial portions of the Software. """Glue between metadata sources and the matching logic.""" -import logging +from __future__ import division, absolute_import, print_function + from collections import namedtuple +from functools import total_ordering import re +from beets import logging from beets import plugins from beets import config +from beets.util import as_string from beets.autotag import mb -from beets.util import levenshtein +from jellyfish import levenshtein_distance from unidecode import unidecode log = logging.getLogger('beets') @@ -109,7 +114,7 @@ 'catalognum', 'script', 'language', 'country', 'albumstatus', 'albumdisambig', 'artist_credit', 'media']: value = getattr(self, fld) - if isinstance(value, str): + if isinstance(value, bytes): setattr(self, fld, value.decode(codec, 'ignore')) if self.tracks: @@ -127,12 +132,15 @@ - ``artist_id`` - ``length``: float: duration of the track in seconds - ``index``: position on the entire release + - ``media``: delivery mechanism (Vinyl, etc.) - ``medium``: the disc number this track appears on in the album - ``medium_index``: the track's position on the disc - ``medium_total``: the number of tracks on the item's disc - ``artist_sort``: name of the track artist for sorting - ``disctitle``: name of the individual medium (subtitle) - ``artist_credit``: Recording-specific artist name + - ``data_source``: The original data source (MusicBrainz, Discogs, etc.) + - ``data_url``: The data source release URL. Only ``title`` and ``track_id`` are required. The rest of the fields may be None. The indices ``index``, ``medium``, and ``medium_index`` @@ -141,13 +149,15 @@ def __init__(self, title, track_id, artist=None, artist_id=None, length=None, index=None, medium=None, medium_index=None, medium_total=None, artist_sort=None, disctitle=None, - artist_credit=None, data_source=None, data_url=None): + artist_credit=None, data_source=None, data_url=None, + media=None): self.title = title self.track_id = track_id self.artist = artist self.artist_id = artist_id self.length = length self.index = index + self.media = media self.medium = medium self.medium_index = medium_index self.medium_total = medium_total @@ -163,9 +173,9 @@ to Unicode. """ for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle', - 'artist_credit']: + 'artist_credit', 'media']: value = getattr(self, fld) - if isinstance(value, str): + if isinstance(value, bytes): setattr(self, fld, value.decode(codec, 'ignore')) @@ -195,13 +205,15 @@ transliteration/lowering to ASCII characters. Normalized by string length. """ - str1 = unidecode(str1) - str2 = unidecode(str2) + assert isinstance(str1, unicode) + assert isinstance(str2, unicode) + str1 = as_string(unidecode(str1)) + str2 = as_string(unidecode(str2)) str1 = re.sub(r'[^a-z0-9]', '', str1.lower()) str2 = re.sub(r'[^a-z0-9]', '', str2.lower()) if not str1 and not str2: return 0.0 - return levenshtein(str1, str2) / float(max(len(str1), len(str2))) + return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2))) def string_dist(str1, str2): @@ -278,6 +290,7 @@ return self.value +@total_ordering class Distance(object): """Keeps track of multiple distance penalties. Provides a single weighted distance for all penalties as well as a weighted distance @@ -287,7 +300,7 @@ self._penalties = {} @LazyClassProperty - def _weights(cls): + def _weights(cls): # noqa """A dictionary from keys to floating-point weights. """ weights_view = config['match']['distance_weights'] @@ -339,12 +352,21 @@ # Convert distance into a negative float we can sort items in # ascending order (for keys, when the penalty is equal) and # still get the items with the biggest distance first. - return sorted(list_, key=lambda (key, dist): (0 - dist, key)) + return sorted( + list_, + key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0]) + ) + + def __hash__(self): + return id(self) + + def __eq__(self, other): + return self.distance == other # Behave like a float. - def __cmp__(self, other): - return cmp(self.distance, other) + def __lt__(self, other): + return self.distance < other def __float__(self): return self.distance @@ -355,6 +377,9 @@ def __rsub__(self, other): return other - self.distance + def __unicode__(self): + return "{0:.2f}".format(self.distance) + # Behave like a dict. def __getitem__(self, key): @@ -380,7 +405,7 @@ """ if not isinstance(dist, Distance): raise ValueError( - '`dist` must be a Distance object, not {0}'.format(type(dist)) + u'`dist` must be a Distance object, not {0}'.format(type(dist)) ) for key, penalties in dist._penalties.iteritems(): self._penalties.setdefault(key, []).extend(penalties) @@ -404,7 +429,7 @@ """ if not 0.0 <= dist <= 1.0: raise ValueError( - '`dist` must be between 0.0 and 1.0, not {0}'.format(dist) + u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist) ) self._penalties.setdefault(key, []).append(dist) @@ -498,7 +523,10 @@ if the ID is not found. """ try: - return mb.album_for_id(release_id) + album = mb.album_for_id(release_id) + if album: + plugins.send(u'albuminfo_received', info=album) + return album except mb.MusicBrainzAPIError as exc: exc.log(log) @@ -508,7 +536,10 @@ if the ID is not found. """ try: - return mb.track_for_id(recording_id) + track = mb.track_for_id(recording_id) + if track: + plugins.send(u'trackinfo_received', info=track) + return track except mb.MusicBrainzAPIError as exc: exc.log(log) @@ -516,15 +547,21 @@ def albums_for_id(album_id): """Get a list of albums for an ID.""" candidates = [album_for_mbid(album_id)] - candidates.extend(plugins.album_for_id(album_id)) - return filter(None, candidates) + plugin_albums = plugins.album_for_id(album_id) + for a in plugin_albums: + plugins.send(u'albuminfo_received', info=a) + candidates.extend(plugin_albums) + return [a for a in candidates if a] def tracks_for_id(track_id): """Get a list of tracks for an ID.""" candidates = [track_for_mbid(track_id)] - candidates.extend(plugins.track_for_id(track_id)) - return filter(None, candidates) + plugin_tracks = plugins.track_for_id(track_id) + for t in plugin_tracks: + plugins.send(u'trackinfo_received', info=t) + candidates.extend(plugin_tracks) + return [t for t in candidates if t] def album_candidates(items, artist, album, va_likely): @@ -553,6 +590,10 @@ # Candidates from plugins. out.extend(plugins.candidates(items, artist, album, va_likely)) + # Notify subscribed plugins about fetched album info + for a in out: + plugins.send(u'albuminfo_received', info=a) + return out @@ -573,4 +614,8 @@ # Plugin candidates. out.extend(plugins.item_candidates(item, artist, title)) + # Notify subscribed plugins about fetched track info + for i in out: + plugins.send(u'trackinfo_received', info=i) + return out diff -Nru beets-1.3.8+dfsg/beets/autotag/__init__.py beets-1.3.19/beets/autotag/__init__.py --- beets-1.3.8+dfsg/beets/autotag/__init__.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beets/autotag/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,8 +15,10 @@ """Facilities for automatically determining files' correct metadata. """ -import logging +from __future__ import division, absolute_import, print_function + +from beets import logging from beets import config # Parts of external interface. @@ -39,6 +42,8 @@ item.mb_trackid = track_info.track_id if track_info.artist_id: item.mb_artistid = track_info.artist_id + if track_info.data_source: + item.data_source = track_info.data_source # At the moment, the other metadata is left intact (including album # and track number). Perhaps these should be emptied? @@ -90,7 +95,11 @@ item.title = track_info.title if config['per_disc_numbering']: - item.track = track_info.medium_index or track_info.index + # We want to let the track number be zero, but if the medium index + # is not provided we need to fall back to the overall index. + item.track = track_info.medium_index + if item.track is None: + item.track = track_info.index item.tracktotal = track_info.medium_total or len(album_info.tracks) else: item.track = track_info.index @@ -122,10 +131,13 @@ 'language', 'country', 'albumstatus', - 'media', - 'albumdisambig'): + 'albumdisambig', + 'data_source',): value = getattr(album_info, field) if value is not None: item[field] = value if track_info.disctitle is not None: item.disctitle = track_info.disctitle + + if track_info.media is not None: + item.media = track_info.media diff -Nru beets-1.3.8+dfsg/beets/autotag/match.py beets-1.3.19/beets/autotag/match.py --- beets-1.3.8+dfsg/beets/autotag/match.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/beets/autotag/match.py 2016-06-20 17:08:57.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,18 +16,20 @@ """Matches existing metadata with canonical information to identify releases and tracks. """ -from __future__ import division + +from __future__ import division, absolute_import, print_function import datetime -import logging import re from munkres import Munkres +from beets import logging from beets import plugins from beets import config from beets.util import plurality from beets.autotag import hooks from beets.util.enumeration import OrderedEnum +from functools import reduce # Artist signals that indicate "various artists". These are used at the # album level to determine whether a given release is likely a VA @@ -267,7 +270,7 @@ # If all album IDs are equal, look up the album. if bool(reduce(lambda x, y: x if x == y else (), albumids)): albumid = albumids[0] - log.debug(u'Searching for discovered album ID: {0}'.format(albumid)) + log.debug(u'Searching for discovered album ID: {0}', albumid) return hooks.album_for_mbid(albumid) else: log.debug(u'No album ID consensus.') @@ -330,11 +333,11 @@ checking the track count, ordering the items, checking for duplicates, and calculating the distance. """ - log.debug(u'Candidate: {0} - {1}'.format(info.artist, info.album)) + log.debug(u'Candidate: {0} - {1}', info.artist, info.album) # Discard albums with zero tracks. if not info.tracks: - log.debug('No tracks.') + log.debug(u'No tracks.') return # Don't duplicate. @@ -345,7 +348,7 @@ # Discard matches without required tags. for req_tag in config['match']['required'].as_str_seq(): if getattr(info, req_tag) is None: - log.debug(u'Ignored. Missing required tag: {0}'.format(req_tag)) + log.debug(u'Ignored. Missing required tag: {0}', req_tag) return # Find mapping between the items and the track info. @@ -355,19 +358,19 @@ dist = distance(items, info, mapping) # Skip matches with ignored penalties. - penalties = [key for _, key in dist] + penalties = [key for key, _ in dist] for penalty in config['match']['ignored'].as_str_seq(): if penalty in penalties: - log.debug(u'Ignored. Penalty: {0}'.format(penalty)) + log.debug(u'Ignored. Penalty: {0}', penalty) return - log.debug(u'Success. Distance: {0}'.format(dist)) + log.debug(u'Success. Distance: {0}', dist) results[info.album_id] = hooks.AlbumMatch(dist, info, mapping, extra_items, extra_tracks) def tag_album(items, search_artist=None, search_album=None, - search_id=None): + search_ids=[]): """Return a tuple of a artist name, an album name, a list of `AlbumMatch` candidates from the metadata backend, and a `Recommendation`. @@ -377,26 +380,31 @@ The `AlbumMatch` objects are generated by searching the metadata backends. By default, the metadata of the items is used for the - search. This can be customized by setting the parameters. The - `mapping` field of the album has the matched `items` as keys. + search. This can be customized by setting the parameters. + `search_ids` is a list of metadata backend IDs: if specified, + it will restrict the candidates to those IDs, ignoring + `search_artist` and `search album`. The `mapping` field of the + album has the matched `items` as keys. - The recommendation is calculated from the match qualitiy of the + The recommendation is calculated from the match quality of the candidates. """ # Get current metadata. likelies, consensus = current_metadata(items) cur_artist = likelies['artist'] cur_album = likelies['album'] - log.debug(u'Tagging {0} - {1}'.format(cur_artist, cur_album)) + log.debug(u'Tagging {0} - {1}', cur_artist, cur_album) # The output result (distance, AlbumInfo) tuples (keyed by MB album # ID). candidates = {} # Search by explicit ID. - if search_id is not None: - log.debug(u'Searching for album ID: {0}'.format(search_id)) - search_cands = hooks.albums_for_id(search_id) + if search_ids: + search_cands = [] + for search_id in search_ids: + log.debug(u'Searching for album ID: {0}', search_id) + search_cands.extend(hooks.albums_for_id(search_id)) # Use existing metadata or text search. else: @@ -404,8 +412,8 @@ id_info = match_by_id(items) if id_info: _add_candidate(items, candidates, id_info) - rec = _recommendation(candidates.values()) - log.debug(u'Album ID match recommendation is {0}'.format(str(rec))) + rec = _recommendation(list(candidates.values())) + log.debug(u'Album ID match recommendation is {0}', rec) if candidates and not config['import']['timid']: # If we have a very good MBID match, return immediately. # Otherwise, this match will compete against metadata-based @@ -418,20 +426,19 @@ if not (search_artist and search_album): # No explicit search terms -- use current metadata. search_artist, search_album = cur_artist, cur_album - log.debug(u'Search terms: {0} - {1}'.format(search_artist, - search_album)) + log.debug(u'Search terms: {0} - {1}', search_artist, search_album) # Is this album likely to be a "various artist" release? va_likely = ((not consensus['artist']) or (search_artist.lower() in VA_ARTISTS) or any(item.comp for item in items)) - log.debug(u'Album might be VA: {0}'.format(str(va_likely))) + log.debug(u'Album might be VA: {0}', va_likely) # Get the results from the data sources. search_cands = hooks.album_candidates(items, search_artist, search_album, va_likely) - log.debug(u'Evaluating {0} candidates.'.format(len(search_cands))) + log.debug(u'Evaluating {0} candidates.', len(search_cands)) for info in search_cands: _add_candidate(items, candidates, info) @@ -442,43 +449,45 @@ def tag_item(item, search_artist=None, search_title=None, - search_id=None): + search_ids=[]): """Attempts to find metadata for a single track. Returns a `(candidates, recommendation)` pair where `candidates` is a list of TrackMatch objects. `search_artist` and `search_title` may be used to override the current metadata for the purposes of the MusicBrainz - title; likewise `search_id`. + title. `search_ids` may be used for restricting the search to a list + of metadata backend IDs. """ # Holds candidates found so far: keys are MBIDs; values are # (distance, TrackInfo) pairs. candidates = {} # First, try matching by MusicBrainz ID. - trackid = search_id or item.mb_trackid - if trackid: - log.debug(u'Searching for track ID: {0}'.format(trackid)) - for track_info in hooks.tracks_for_id(trackid): - dist = track_distance(item, track_info, incl_artist=True) - candidates[track_info.track_id] = \ - hooks.TrackMatch(dist, track_info) - # If this is a good match, then don't keep searching. - rec = _recommendation(candidates.values()) - if rec == Recommendation.strong and not config['import']['timid']: - log.debug(u'Track ID match.') - return candidates.values(), rec + trackids = search_ids or [t for t in [item.mb_trackid] if t] + if trackids: + for trackid in trackids: + log.debug(u'Searching for track ID: {0}', trackid) + for track_info in hooks.tracks_for_id(trackid): + dist = track_distance(item, track_info, incl_artist=True) + candidates[track_info.track_id] = \ + hooks.TrackMatch(dist, track_info) + # If this is a good match, then don't keep searching. + rec = _recommendation(sorted(candidates.itervalues())) + if rec == Recommendation.strong and \ + not config['import']['timid']: + log.debug(u'Track ID match.') + return sorted(candidates.itervalues()), rec # If we're searching by ID, don't proceed. - if search_id is not None: + if search_ids: if candidates: - return candidates.values(), rec + return sorted(candidates.itervalues()), rec else: return [], Recommendation.none # Search terms. if not (search_artist and search_title): search_artist, search_title = item.artist, item.title - log.debug(u'Item search terms: {0} - {1}'.format(search_artist, - search_title)) + log.debug(u'Item search terms: {0} - {1}', search_artist, search_title) # Get and evaluate candidate metadata. for track_info in hooks.item_candidates(item, search_artist, search_title): @@ -486,7 +495,7 @@ candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) # Sort by distance and return with recommendation. - log.debug(u'Found {0} candidates.'.format(len(candidates))) + log.debug(u'Found {0} candidates.', len(candidates)) candidates = sorted(candidates.itervalues()) rec = _recommendation(candidates) return candidates, rec diff -Nru beets-1.3.8+dfsg/beets/autotag/mb.py beets-1.3.19/beets/autotag/mb.py --- beets-1.3.8+dfsg/beets/autotag/mb.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beets/autotag/mb.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,23 +15,24 @@ """Searches for albums in the MusicBrainz database. """ -import logging +from __future__ import division, absolute_import, print_function + import musicbrainzngs import re import traceback from urlparse import urljoin +from beets import logging import beets.autotag.hooks import beets from beets import util from beets import config -SEARCH_LIMIT = 5 VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377' BASE_URL = 'http://musicbrainz.org/' musicbrainzngs.set_useragent('beets', beets.__version__, - 'http://beets.radbox.org/') + 'http://beets.io/') class MusicBrainzAPIError(util.HumanReadableException): @@ -39,6 +41,8 @@ """ def __init__(self, reason, verb, query, tb=None): self.query = query + if isinstance(reason, musicbrainzngs.WebServiceError): + reason = u'MusicBrainz not reachable' super(MusicBrainzAPIError, self).__init__(reason, verb, tb) def get_message(self): @@ -157,6 +161,7 @@ medium=medium, medium_index=medium_index, medium_total=medium_total, + data_source=u'MusicBrainz', data_url=track_url(recording['id']), ) @@ -209,7 +214,13 @@ index = 0 for medium in release['medium-list']: disctitle = medium.get('title') - for track in medium['track-list']: + format = medium.get('format') + + all_tracks = medium['track-list'] + if 'pregap' in medium: + all_tracks.insert(0, medium['pregap']) + + for track in all_tracks: # Basic information from the recording. index += 1 ti = track_info( @@ -220,6 +231,7 @@ len(medium['track-list']), ) ti.disctitle = disctitle + ti.media = format # Prefer track data, where present, over recording data. if track.get('title'): @@ -243,10 +255,12 @@ mediums=len(release['medium-list']), artist_sort=artist_sort_name, artist_credit=artist_credit_name, - data_source='MusicBrainz', + data_source=u'MusicBrainz', data_url=album_url(release['id']), ) info.va = info.artist_id == VARIOUS_ARTISTS_ID + if info.va: + info.artist = config['va_name'].get(unicode) info.asin = release.get('asin') info.releasegroup_id = release['release-group']['id'] info.country = release.get('country') @@ -299,7 +313,7 @@ return info -def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT): +def match_album(artist, album, tracks=None): """Searches for a single album ("release" in MusicBrainz parlance) and returns an iterator over AlbumInfo objects. May raise a MusicBrainzAPIError. @@ -308,21 +322,22 @@ optionally, a number of tracks on the album. """ # Build search criteria. - criteria = {'release': album.lower()} + criteria = {'release': album.lower().strip()} if artist is not None: - criteria['artist'] = artist.lower() + criteria['artist'] = artist.lower().strip() else: # Various Artists search. criteria['arid'] = VARIOUS_ARTISTS_ID if tracks is not None: - criteria['tracks'] = str(tracks) + criteria['tracks'] = unicode(tracks) # Abort if we have no search terms. if not any(criteria.itervalues()): return try: - res = musicbrainzngs.search_releases(limit=limit, **criteria) + res = musicbrainzngs.search_releases( + limit=config['musicbrainz']['searchlimit'].get(int), **criteria) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError(exc, 'release search', criteria, traceback.format_exc()) @@ -334,20 +349,21 @@ yield albuminfo -def match_track(artist, title, limit=SEARCH_LIMIT): +def match_track(artist, title): """Searches for a single track and returns an iterable of TrackInfo objects. May raise a MusicBrainzAPIError. """ criteria = { - 'artist': artist.lower(), - 'recording': title.lower(), + 'artist': artist.lower().strip(), + 'recording': title.lower().strip(), } if not any(criteria.itervalues()): return try: - res = musicbrainzngs.search_recordings(limit=limit, **criteria) + res = musicbrainzngs.search_recordings( + limit=config['musicbrainz']['searchlimit'].get(int), **criteria) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError(exc, 'recording search', criteria, traceback.format_exc()) @@ -360,7 +376,7 @@ no ID can be found, return None. """ # Find the first thing that looks like a UUID/MBID. - match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) + match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) if match: return match.group() @@ -372,7 +388,7 @@ """ albumid = _parse_id(releaseid) if not albumid: - log.debug(u'Invalid MBID ({0}).'.format(releaseid)) + log.debug(u'Invalid MBID ({0}).', releaseid) return try: res = musicbrainzngs.get_release_by_id(albumid, @@ -381,7 +397,7 @@ log.debug(u'Album ID match failed.') return None except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError(exc, 'get release by ID', albumid, + raise MusicBrainzAPIError(exc, u'get release by ID', albumid, traceback.format_exc()) return album_info(res['release']) @@ -392,7 +408,7 @@ """ trackid = _parse_id(releaseid) if not trackid: - log.debug(u'Invalid MBID ({0}).'.format(releaseid)) + log.debug(u'Invalid MBID ({0}).', releaseid) return try: res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) @@ -400,6 +416,6 @@ log.debug(u'Track ID match failed.') return None except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError(exc, 'get recording by ID', trackid, + raise MusicBrainzAPIError(exc, u'get recording by ID', trackid, traceback.format_exc()) return track_info(res['recording']) diff -Nru beets-1.3.8+dfsg/beets/config_default.yaml beets-1.3.19/beets/config_default.yaml --- beets-1.3.8+dfsg/beets/config_default.yaml 2014-09-16 21:41:57.000000000 +0000 +++ beets-1.3.19/beets/config_default.yaml 2016-06-20 01:53:12.000000000 +0000 @@ -5,6 +5,7 @@ write: yes copy: yes move: no + link: no delete: no resume: ask incremental: no @@ -20,9 +21,14 @@ detail: no flat: no group_albums: no + pretend: false + search_ids: [] + duplicate_action: ask clutter: ["Thumbs.DB", ".DS_Store"] -ignore: [".*", "*~", "System Volume Information"] +ignore: [".*", "*~", "System Volume Information", "lost+found"] +ignore_hidden: yes + replace: '[\\/]': _ '^\.': _ @@ -39,24 +45,35 @@ plugins: [] pluginpath: [] threaded: yes -color: yes timeout: 5.0 per_disc_numbering: no -verbose: no -terminal_encoding: utf8 +verbose: 0 +terminal_encoding: original_date: no id3v23: no +va_name: "Various Artists" ui: terminal_width: 80 length_diff_thresh: 10.0 + color: yes + colors: + text_success: green + text_warning: yellow + text_error: red + text_highlight: red + text_highlight_minor: lightgray + action_default: turquoise + action: blue -list_format_item: $artist - $album - $title -list_format_album: $albumartist - $album +format_item: $artist - $album - $title +format_album: $albumartist - $album time_format: '%Y-%m-%d %H:%M:%S' +format_raw_length: no sort_album: albumartist+ album+ sort_item: artist+ album+ disc+ track+ +sort_case_insensitive: yes paths: default: $albumartist/$album%aunique{}/$track $title @@ -69,6 +86,7 @@ host: musicbrainz.org ratelimit: 1 ratelimit_interval: 1.0 + searchlimit: 5 match: strong_rec_thresh: 0.04 diff -Nru beets-1.3.8+dfsg/beets/dbcore/db.py beets-1.3.19/beets/dbcore/db.py --- beets-1.3.8+dfsg/beets/dbcore/db.py 2014-09-16 21:46:53.000000000 +0000 +++ beets-1.3.19/beets/dbcore/db.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,6 +15,8 @@ """The central Model and Database constructs for DBCore. """ +from __future__ import division, absolute_import, print_function + import time import os from collections import defaultdict @@ -31,7 +34,7 @@ class FormattedMapping(collections.Mapping): """A `dict`-like formatted view of a model. - The accessor `mapping[key]` returns the formated version of + The accessor `mapping[key]` returns the formatted version of `model[key]` as a unicode string. If `for_path` is true, all path separators in the formatted values @@ -129,6 +132,12 @@ are subclasses of `Sort`. """ + _always_dirty = False + """By default, fields only become "dirty" when their value actually + changes. Enabling this flag marks fields as dirty even when the new + value is the same as the old value (e.g., `o.f = o.f`). + """ + @classmethod def _getters(cls): """Return a mapping from field names to getter functions. @@ -191,20 +200,22 @@ exception is raised otherwise. """ if not self._db: - raise ValueError('{0} has no database'.format(type(self).__name__)) + raise ValueError( + u'{0} has no database'.format(type(self).__name__) + ) if need_id and not self.id: - raise ValueError('{0} has no id'.format(type(self).__name__)) + raise ValueError(u'{0} has no id'.format(type(self).__name__)) # Essential field accessors. @classmethod - def _type(self, key): + def _type(cls, key): """Get the type of a field, a `Type` instance. If the field has no explicit type, it is given the base `Type`, which does no conversion. """ - return self._fields.get(key) or self._types.get(key) or types.DEFAULT + return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT def __getitem__(self, key): """Get the value for a field. Raise a KeyError if the field is @@ -235,7 +246,7 @@ # Assign value and possibly mark as dirty. old_value = source.get(key) source[key] = value - if old_value != value: + if self._always_dirty or old_value != value: self._dirty.add(key) def __delitem__(self, key): @@ -245,23 +256,30 @@ del self._values_flex[key] self._dirty.add(key) # Mark for dropping on store. elif key in self._getters(): # Computed. - raise KeyError('computed field {0} cannot be deleted'.format(key)) + raise KeyError(u'computed field {0} cannot be deleted'.format(key)) elif key in self._fields: # Fixed. - raise KeyError('fixed field {0} cannot be deleted'.format(key)) + raise KeyError(u'fixed field {0} cannot be deleted'.format(key)) else: - raise KeyError('no such field {0}'.format(key)) + raise KeyError(u'no such field {0}'.format(key)) def keys(self, computed=False): """Get a list of available field names for this object. The `computed` parameter controls whether computed (plugin-provided) fields are included in the key list. """ - base_keys = list(self._fields) + self._values_flex.keys() + base_keys = list(self._fields) + list(self._values_flex.keys()) if computed: - return base_keys + self._getters().keys() + return base_keys + list(self._getters().keys()) else: return base_keys + @classmethod + def all_keys(cls): + """Get a list of available keys for objects of this type. + Includes fixed and computed fields. + """ + return list(cls._fields) + list(cls._getters().keys()) + # Act like a dictionary. def update(self, values): @@ -301,12 +319,12 @@ def __getattr__(self, key): if key.startswith('_'): - raise AttributeError('model has no attribute {0!r}'.format(key)) + raise AttributeError(u'model has no attribute {0!r}'.format(key)) else: try: return self[key] except KeyError: - raise AttributeError('no such field {0!r}'.format(key)) + raise AttributeError(u'no such field {0!r}'.format(key)) def __setattr__(self, key, value): if key.startswith('_'): @@ -373,7 +391,7 @@ """ self._check_db() stored_obj = self._db._get(type(self), self.id) - assert stored_obj is not None, "object {0} not in DB".format(self.id) + assert stored_obj is not None, u"object {0} not in DB".format(self.id) self._values_fixed = {} self._values_flex = {} self.update(dict(stored_obj)) @@ -446,10 +464,15 @@ """Parse a string as a value for the given key. """ if not isinstance(string, basestring): - raise TypeError("_parse() argument must be a string") + raise TypeError(u"_parse() argument must be a string") return cls._type(key).parse(string) + def set_parse(self, key, string): + """Set the object's key to a value represented by a string. + """ + self[key] = self._parse(key, string) + # Database controller and supporting interfaces. @@ -459,14 +482,18 @@ """ def __init__(self, model_class, rows, db, query=None, sort=None): """Create a result set that will construct objects of type - `model_class`, which should be a subclass of `LibModel`, out of - the query result mapping in `rows`. The new objects are - associated with the database `db`. - If `query` is provided, it is used as a predicate to filter the results - for a "slow query" that cannot be evaluated by the database directly. - If `sort` is provided, it is used to sort the full list of results - before returning. This means it is a "slow sort" and all objects must - be built before returning the first one. + `model_class`. + + `model_class` is a subclass of `LibModel` that will be + constructed. `rows` is a query result: a list of mappings. The + new objects will be associated with the database `db`. + + If `query` is provided, it is used as a predicate to filter the + results for a "slow query" that cannot be evaluated by the + database directly. If `sort` is provided, it is used to sort the + full list of results before returning. This means it is a "slow + sort" and all objects must be built before returning the first + one. """ self.model_class = model_class self.rows = rows @@ -474,28 +501,59 @@ self.query = query self.sort = sort + # We keep a queue of rows we haven't yet consumed for + # materialization. We preserve the original total number of + # rows. + self._rows = rows + self._row_count = len(rows) + + # The materialized objects corresponding to rows that have been + # consumed. + self._objects = [] + + def _get_objects(self): + """Construct and generate Model objects for they query. The + objects are returned in the order emitted from the database; no + slow sort is applied. + + For performance, this generator caches materialized objects to + avoid constructing them more than once. This way, iterating over + a `Results` object a second time should be much faster than the + first. + """ + index = 0 # Position in the materialized objects. + while index < len(self._objects) or self._rows: + # Are there previously-materialized objects to produce? + if index < len(self._objects): + yield self._objects[index] + index += 1 + + # Otherwise, we consume another row, materialize its object + # and produce it. + else: + while self._rows: + row = self._rows.pop(0) + obj = self._make_model(row) + # If there is a slow-query predicate, ensurer that the + # object passes it. + if not self.query or self.query.match(obj): + self._objects.append(obj) + index += 1 + yield obj + break + def __iter__(self): - """Construct Python objects for all rows that pass the query - predicate. + """Construct and generate Model objects for all matching + objects, in sorted order. """ if self.sort: # Slow sort. Must build the full list first. - objects = [] - for row in self.rows: - obj = self._make_model(row) - # check the predicate if any - if not self.query or self.query.match(obj): - objects.append(obj) - # Now that we have the full list, we can sort it - objects = self.sort.sort(objects) - for o in objects: - yield o - else: - for row in self.rows: - obj = self._make_model(row) - # check the predicate if any - if not self.query or self.query.match(obj): - yield obj + objects = self.sort.sort(list(self._get_objects())) + return iter(objects) + + else: + # Objects are pre-sorted (i.e., by the database). + return self._get_objects() def _make_model(self, row): # Get the flexible attributes for the object. @@ -519,7 +577,11 @@ def __len__(self): """Get the number of matching objects. """ - if self.query: + if not self._rows: + # Fully materialized. Just count the objects. + return len(self._objects) + + elif self.query: # A slow query. Fall back to testing every object. count = 0 for obj in self: @@ -528,24 +590,34 @@ else: # A fast query. Just count the rows. - return len(self.rows) + return self._row_count def __nonzero__(self): """Does this result contain any objects? """ + return self.__bool__() + + def __bool__(self): + """Does this result contain any objects? + """ return bool(len(self)) def __getitem__(self, n): """Get the nth item in this result set. This is inefficient: all items up to n are materialized and thrown away. """ + if not self._rows and not self.sort: + # Fully materialized and already in order. Just look up the + # object. + return self._objects[n] + it = iter(self) try: for i in range(n): - it.next() - return it.next() + next(it) + return next(it) except StopIteration: - raise IndexError('result index {0} out of range'.format(n)) + raise IndexError(u'result index {0} out of range'.format(n)) def get(self): """Return the first matching object, or None if no objects @@ -553,7 +625,7 @@ """ it = iter(self) try: - return it.next() + return next(it) except StopIteration: return None @@ -666,6 +738,14 @@ self._connections[thread_id] = conn return conn + def _close(self): + """Close the all connections to the underlying SQLite database + from all threads. This does not render the database object + unusable; new connections can still be opened on demand. + """ + with self._shared_map_lock: + self._connections.clear() + @contextlib.contextmanager def _tx_stack(self): """A context manager providing access to the current thread's diff -Nru beets-1.3.8+dfsg/beets/dbcore/__init__.py beets-1.3.19/beets/dbcore/__init__.py --- beets-1.3.8+dfsg/beets/dbcore/__init__.py 2014-09-16 00:39:44.000000000 +0000 +++ beets-1.3.19/beets/dbcore/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,11 +16,14 @@ """DBCore is an abstract database package that forms the basis for beets' Library. """ +from __future__ import division, absolute_import, print_function + from .db import Model, Database from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery from .types import Type from .queryparse import query_from_strings from .queryparse import sort_from_strings from .queryparse import parse_sorted_query +from .query import InvalidQueryError # flake8: noqa diff -Nru beets-1.3.8+dfsg/beets/dbcore/queryparse.py beets-1.3.19/beets/dbcore/queryparse.py --- beets-1.3.8+dfsg/beets/dbcore/queryparse.py 2014-09-16 02:33:11.000000000 +0000 +++ beets-1.3.19/beets/dbcore/queryparse.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,13 +15,17 @@ """Parsing of strings into DBCore queries. """ +from __future__ import division, absolute_import, print_function + import re import itertools from . import query - +import beets PARSE_QUERY_PART_REGEX = re.compile( # Non-capturing optional segment for the keyword. + r'(-|\^)?' # Negation prefixes. + r'(?:' r'(\S+?)' # The field key. r'(? (None, 'stapler', SubstringQuery) - 'color:red' -> ('color', 'red', SubstringQuery) - ':^Quiet' -> (None, '^Quiet', RegexpQuery) - 'color::b..e' -> ('color', 'b..e', RegexpQuery) + """Parse a single *query part*, which is a chunk of a complete query + string representing a single criterion. - Prefixes may be "escaped" with a backslash to disable the keying - behavior. + A query part is a string consisting of: + - A *pattern*: the value to look for. + - Optionally, a *field name* preceding the pattern, separated by a + colon. So in `foo:bar`, `foo` is the field name and `bar` is the + pattern. + - Optionally, a *query prefix* just before the pattern (and after the + optional colon) indicating the type of query that should be used. For + example, in `~foo`, `~` might be a prefix. (The set of prefixes to + look for is given in the `prefixes` parameter.) + - Optionally, a negation indicator, `-` or `^`, at the very beginning. + + Both prefixes and the separating `:` character may be escaped with a + backslash to avoid their normal meaning. + + The function returns a tuple consisting of: + - The field name: a string or None if it's not present. + - The pattern, a string. + - The query class to use, which inherits from the base + :class:`Query` type. + - A negation flag, a bool. + + The three optional parameters determine which query class is used (i.e., + the third return value). They are: + - `query_classes`, which maps field names to query classes. These + are used when no explicit prefix is present. + - `prefixes`, which maps prefix strings to query classes. + - `default_class`, the fallback when neither the field nor a prefix + indicates a query class. + + So the precedence for determining which query class to return is: + prefix, followed by field, and finally the default. + + For example, assuming the `:` prefix is used for `RegexpQuery`: + - `'stapler'` -> `(None, 'stapler', SubstringQuery, False)` + - `'color:red'` -> `('color', 'red', SubstringQuery, False)` + - `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because + the `^` follows the `:` + - `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)` + - `'-color:red'` -> `('color', 'red', SubstringQuery, True)` """ + # Apply the regular expression and extract the components. part = part.strip() match = PARSE_QUERY_PART_REGEX.match(part) - assert match # Regex should always match. - key = match.group(1) - term = match.group(2).replace('\:', ':') + assert match # Regex should always match + negate = bool(match.group(1)) + key = match.group(2) + term = match.group(3).replace('\:', ':') - # Match the search term against the list of prefixes. + # Check whether there's a prefix in the query and use the + # corresponding query type. for pre, query_class in prefixes.items(): if term.startswith(pre): - return key, term[len(pre):], query_class + return key, term[len(pre):], query_class, negate - # No matching prefix: use type-based or fallback/default query. + # No matching prefix, so use either the query class determined by + # the field or the default as a fallback. query_class = query_classes.get(key, default_class) - return key, term, query_class + return key, term, query_class, negate def construct_query_part(model_cls, prefixes, query_part): - """Create a query from a single query component, `query_part`, for - querying instances of `model_cls`. Return a `Query` instance. + """Parse a *query part* string and return a :class:`Query` object. + + :param model_cls: The :class:`Model` class that this is a query for. + This is used to determine the appropriate query types for the + model's fields. + :param prefixes: A map from prefix strings to :class:`Query` types. + :param query_part: The string to parse. + + See the documentation for `parse_query_part` for more information on + query part syntax. """ - # Shortcut for empty query parts. + # A shortcut for empty query parts. if not query_part: return query.TrueQuery() - # Get the query classes for each possible field. + # Use `model_cls` to build up a map from field names to `Query` + # classes. query_classes = {} for k, t in itertools.chain(model_cls._fields.items(), model_cls._types.items()): query_classes[k] = t.query # Parse the string. - key, pattern, query_class = \ + key, pattern, query_class, negate = \ parse_query_part(query_part, query_classes, prefixes) - # No key specified. + # If there's no key (field name) specified, this is a "match + # anything" query. if key is None: if issubclass(query_class, query.FieldQuery): # The query type matches a specific field, but none was # specified. So we use a version of the query that matches # any field. - return query.AnyFieldQuery(pattern, model_cls._search_fields, - query_class) + q = query.AnyFieldQuery(pattern, model_cls._search_fields, + query_class) + if negate: + return query.NotQuery(q) + else: + return q else: - # Other query type. - return query_class(pattern) + # Non-field query type. + if negate: + return query.NotQuery(query_class(pattern)) + else: + return query_class(pattern) + # Otherwise, this must be a `FieldQuery`. Use the field name to + # construct the query object. key = key.lower() - return query_class(key.lower(), pattern, key in model_cls._fields) + q = query_class(key.lower(), pattern, key in model_cls._fields) + if negate: + return query.NotQuery(q) + return q def query_from_strings(query_cls, model_cls, prefixes, query_parts): @@ -136,13 +185,15 @@ assert direction in ('+', '-'), "part must end with + or -" is_ascending = direction == '+' + case_insensitive = beets.config['sort_case_insensitive'].get(bool) if field in model_cls._sorts: - sort = model_cls._sorts[field](model_cls, is_ascending) + sort = model_cls._sorts[field](model_cls, is_ascending, + case_insensitive) elif field in model_cls._fields: - sort = query.FixedFieldSort(field, is_ascending) + sort = query.FixedFieldSort(field, is_ascending, case_insensitive) else: # Flexible or computed. - sort = query.SlowFieldSort(field, is_ascending) + sort = query.SlowFieldSort(field, is_ascending, case_insensitive) return sort @@ -150,31 +201,50 @@ """Create a `Sort` from a list of sort criteria (strings). """ if not sort_parts: - return query.NullSort() + sort = query.NullSort() + elif len(sort_parts) == 1: + sort = construct_sort_part(model_cls, sort_parts[0]) else: sort = query.MultipleSort() for part in sort_parts: sort.add_sort(construct_sort_part(model_cls, part)) - return sort + return sort -def parse_sorted_query(model_cls, parts, prefixes={}, - query_cls=query.AndQuery): +def parse_sorted_query(model_cls, parts, prefixes={}): """Given a list of strings, create the `Query` and `Sort` that they represent. """ # Separate query token and sort token. query_parts = [] sort_parts = [] - for part in parts: - if part.endswith((u'+', u'-')) and u':' not in part: - sort_parts.append(part) + + # Split up query in to comma-separated subqueries, each representing + # an AndQuery, which need to be joined together in one OrQuery + subquery_parts = [] + for part in parts + [u',']: + if part.endswith(u','): + # Ensure we can catch "foo, bar" as well as "foo , bar" + last_subquery_part = part[:-1] + if last_subquery_part: + subquery_parts.append(last_subquery_part) + # Parse the subquery in to a single AndQuery + # TODO: Avoid needlessly wrapping AndQueries containing 1 subquery? + query_parts.append(query_from_strings( + query.AndQuery, model_cls, prefixes, subquery_parts + )) + del subquery_parts[:] else: - query_parts.append(part) + # Sort parts (1) end in + or -, (2) don't have a field, and + # (3) consist of more than just the + or -. + if part.endswith((u'+', u'-')) \ + and u':' not in part \ + and len(part) > 1: + sort_parts.append(part) + else: + subquery_parts.append(part) - # Parse each. - q = query_from_strings( - query_cls, model_cls, prefixes, query_parts - ) + # Avoid needlessly wrapping single statements in an OR + q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0] s = sort_from_strings(model_cls, sort_parts) return q, s diff -Nru beets-1.3.8+dfsg/beets/dbcore/query.py beets-1.3.19/beets/dbcore/query.py --- beets-1.3.8+dfsg/beets/dbcore/query.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/beets/dbcore/query.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,10 +15,45 @@ """The Query type hierarchy for DBCore. """ +from __future__ import division, absolute_import, print_function + import re -from operator import attrgetter +from operator import mul from beets import util from datetime import datetime, timedelta +import unicodedata +from functools import reduce + + +class ParsingError(ValueError): + """Abstract class for any unparseable user-requested album/query + specification. + """ + + +class InvalidQueryError(ParsingError): + """Represent any kind of invalid query. + + The query should be a unicode string or a list, which will be space-joined. + """ + def __init__(self, query, explanation): + if isinstance(query, list): + query = " ".join(query) + message = u"'{0}': {1}".format(query, explanation) + super(InvalidQueryError, self).__init__(message) + + +class InvalidQueryArgumentTypeError(ParsingError): + """Represent a query argument that could not be converted as expected. + + It exists to be caught in upper stack levels so a meaningful (i.e. with the + query) InvalidQueryError can be raised. + """ + def __init__(self, what, expected, detail=None): + message = u"'{0}' is not {1}".format(what, expected) + if detail: + message = u"{0}: {1}".format(message, detail) + super(InvalidQueryArgumentTypeError, self).__init__(message) class Query(object): @@ -25,9 +61,8 @@ """ def clause(self): """Generate an SQLite expression implementing the query. - Return a clause string, a sequence of substitution values for - the clause, and a Query object representing the "remainder" - Returns (clause, subvals) where clause is a valid sqlite + + Return (clause, subvals) where clause is a valid sqlite WHERE clause implementing the query and subvals is a list of items to be substituted for ?s in the clause. """ @@ -39,6 +74,15 @@ """ raise NotImplementedError + def __repr__(self): + return "{0.__class__.__name__}()".format(self) + + def __eq__(self, other): + return type(self) == type(other) + + def __hash__(self): + return 0 + class FieldQuery(Query): """An abstract query that searches in a specific field for a @@ -72,6 +116,17 @@ def match(self, item): return self.value_match(self.pattern, item.get(self.field)) + def __repr__(self): + return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, " + "{0.fast})".format(self)) + + def __eq__(self, other): + return super(FieldQuery, self).__eq__(other) and \ + self.field == other.field and self.pattern == other.pattern + + def __hash__(self): + return hash((self.field, hash(self.pattern))) + class MatchQuery(FieldQuery): """A query that looks for exact matches in an item field.""" @@ -86,19 +141,21 @@ class NoneQuery(FieldQuery): def __init__(self, field, fast=True): - self.field = field - self.fast = fast + super(NoneQuery, self).__init__(field, None, fast) def col_clause(self): return self.field + " IS NULL", () @classmethod - def match(self, item): + def match(cls, item): try: - return item[self.field] is None + return item[cls.field] is None except KeyError: return True + def __repr__(self): + return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self) + class StringFieldQuery(FieldQuery): """A FieldQuery that converts values to strings before matching @@ -139,15 +196,31 @@ class RegexpQuery(StringFieldQuery): """A query that matches a regular expression in a specific item field. + + Raises InvalidQueryError when the pattern is not a valid regular + expression. """ - @classmethod - def string_match(cls, pattern, value): + def __init__(self, field, pattern, fast=True): + super(RegexpQuery, self).__init__(field, pattern, fast) + pattern = self._normalize(pattern) try: - res = re.search(pattern, value) - except re.error: + self.pattern = re.compile(self.pattern) + except re.error as exc: # Invalid regular expression. - return False - return res is not None + raise InvalidQueryArgumentTypeError(pattern, + u"a regular expression", + format(exc)) + + @staticmethod + def _normalize(s): + """Normalize a Unicode string's representation (used on both + patterns and matched values). + """ + return unicodedata.normalize('NFC', s) + + @classmethod + def string_match(cls, pattern, value): + return pattern.search(cls._normalize(value)) is not None class BooleanQuery(MatchQuery): @@ -163,7 +236,7 @@ class BytesQuery(MatchQuery): """Match a raw bytes field (i.e., a path). This is a necessary hack - to work around the `sqlite3` module's desire to treat `str` and + to work around the `sqlite3` module's desire to treat `bytes` and `unicode` equivalently in Python 2. Always use this query instead of `MatchQuery` when matching on BLOB values. """ @@ -173,9 +246,7 @@ # Use a buffer representation of the pattern for SQLite # matching. This instructs SQLite to treat the blob as binary # rather than encoded Unicode. - if isinstance(self.pattern, basestring): - # Implicitly coerce Unicode strings to their bytes - # equivalents. + if isinstance(self.pattern, (unicode, bytes)): if isinstance(self.pattern, unicode): self.pattern = self.pattern.encode('utf8') self.buf_pattern = buffer(self.pattern) @@ -191,19 +262,26 @@ """Matches numeric fields. A syntax using Ruby-style range ellipses (``..``) lets users specify one- or two-sided ranges. For example, ``year:2001..`` finds music released since the turn of the century. + + Raises InvalidQueryError when the pattern does not represent an int or + a float. """ def _convert(self, s): - """Convert a string to a numeric type (float or int). If the - string cannot be converted, return None. + """Convert a string to a numeric type (float or int). + + Return None if `s` is empty. + Raise an InvalidQueryError if the string cannot be converted. """ # This is really just a bit of fun premature optimization. + if not s: + return None try: return int(s) except ValueError: try: return float(s) except ValueError: - return None + raise InvalidQueryArgumentTypeError(s, u"an int or a float") def __init__(self, field, pattern, fast=True): super(NumericQuery, self).__init__(field, pattern, fast) @@ -248,7 +326,7 @@ elif self.rangemax is not None: return u'{0} <= ?'.format(self.field), (self.rangemax,) else: - return '1', () + return u'1', () class CollectionQuery(Query): @@ -273,7 +351,7 @@ return item in self.subqueries def clause_with_joiner(self, joiner): - """Returns a clause created by joining together the clauses of + """Return a clause created by joining together the clauses of all subqueries with the string joiner (padded by spaces). """ clause_parts = [] @@ -288,6 +366,19 @@ clause = (' ' + joiner + ' ').join(clause_parts) return clause, subvals + def __repr__(self): + return "{0.__class__.__name__}({0.subqueries!r})".format(self) + + def __eq__(self, other): + return super(CollectionQuery, self).__eq__(other) and \ + self.subqueries == other.subqueries + + def __hash__(self): + """Since subqueries are mutable, this object should not be hashable. + However and for conveniences purposes, it can be hashed. + """ + return reduce(mul, map(hash, self.subqueries), 1) + class AnyFieldQuery(CollectionQuery): """A query that matches if a given FieldQuery subclass matches in @@ -313,6 +404,17 @@ return True return False + def __repr__(self): + return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, " + "{0.query_class.__name__})".format(self)) + + def __eq__(self, other): + return super(AnyFieldQuery, self).__eq__(other) and \ + self.query_class == other.query_class + + def __hash__(self): + return hash((self.pattern, tuple(self.fields), self.query_class)) + class MutableCollectionQuery(CollectionQuery): """A collection query whose subqueries may be modified after the @@ -343,6 +445,36 @@ return any([q.match(item) for q in self.subqueries]) +class NotQuery(Query): + """A query that matches the negation of its `subquery`, as a shorcut for + performing `not(subquery)` without using regular expressions. + """ + def __init__(self, subquery): + self.subquery = subquery + + def clause(self): + clause, subvals = self.subquery.clause() + if clause: + return 'not ({0})'.format(clause), subvals + else: + # If there is no clause, there is nothing to negate. All the logic + # is handled by match() for slow queries. + return clause, subvals + + def match(self, item): + return not self.subquery.match(item) + + def __repr__(self): + return "{0.__class__.__name__}({0.subquery!r})".format(self) + + def __eq__(self, other): + return super(NotQuery, self).__eq__(other) and \ + self.subquery == other.subquery + + def __hash__(self): + return hash(('not', hash(self.subquery))) + + class TrueQuery(Query): """A query that always matches.""" def clause(self): @@ -369,11 +501,7 @@ """ epoch = datetime.fromtimestamp(0) delta = date - epoch - try: - return int(delta.total_seconds()) - except AttributeError: - # datetime.timedelta.total_seconds() is not available on Python 2.6 - return delta.seconds + delta.days * 24 * 3600 + return int(delta.total_seconds()) def _parse_periods(pattern): @@ -405,7 +533,7 @@ precision (a string, one of "year", "month", or "day"). """ if precision not in Period.precisions: - raise ValueError('Invalid precision ' + str(precision)) + raise ValueError(u'Invalid precision {0}'.format(precision)) self.date = date self.precision = precision @@ -418,10 +546,14 @@ return None ordinal = string.count('-') if ordinal >= len(cls.date_formats): - raise ValueError('date is not in one of the formats ' - + ', '.join(cls.date_formats)) + # Too many components. + return None date_format = cls.date_formats[ordinal] - date = datetime.strptime(string, date_format) + try: + date = datetime.strptime(string, date_format) + except ValueError: + # Parsing failed. + return None precision = cls.precisions[ordinal] return cls(date, precision) @@ -441,7 +573,7 @@ elif 'day' == precision: return date + timedelta(days=1) else: - raise ValueError('unhandled precision ' + str(precision)) + raise ValueError(u'unhandled precision {0}'.format(precision)) class DateInterval(object): @@ -453,7 +585,7 @@ def __init__(self, start, end): if start is not None and end is not None and not start < end: - raise ValueError("start date {0} is not before end date {1}" + raise ValueError(u"start date {0} is not before end date {1}" .format(start, end)) self.start = start self.end = end @@ -474,7 +606,7 @@ return True def __str__(self): - return'[{0}, {1})'.format(self.start, self.end) + return '[{0}, {1})'.format(self.start, self.end) class DateQuery(FieldQuery): @@ -519,6 +651,33 @@ return clause, subvals +class DurationQuery(NumericQuery): + """NumericQuery that allow human-friendly (M:SS) time interval formats. + + Converts the range(s) to a float value, and delegates on NumericQuery. + + Raises InvalidQueryError when the pattern does not represent an int, float + or M:SS time interval. + """ + def _convert(self, s): + """Convert a M:SS or numeric string to a float. + + Return None if `s` is empty. + Raise an InvalidQueryError if the string cannot be converted. + """ + if not s: + return None + try: + return util.raw_seconds_short(s) + except ValueError: + try: + return float(s) + except ValueError: + raise InvalidQueryArgumentTypeError( + s, + u"a M:SS string or a float") + + # Sorting. class Sort(object): @@ -543,6 +702,12 @@ """ return False + def __hash__(self): + return 0 + + def __eq__(self, other): + return type(self) == type(other) + class MultipleSort(Sort): """Sort that encapsulates multiple sub-sorts. @@ -602,38 +767,67 @@ return items def __repr__(self): - return u'MultipleSort({0})'.format(repr(self.sorts)) + return 'MultipleSort({!r})'.format(self.sorts) + + def __hash__(self): + return hash(tuple(self.sorts)) + + def __eq__(self, other): + return super(MultipleSort, self).__eq__(other) and \ + self.sorts == other.sorts class FieldSort(Sort): """An abstract sort criterion that orders by a specific field (of any kind). """ - def __init__(self, field, ascending=True): + def __init__(self, field, ascending=True, case_insensitive=True): self.field = field self.ascending = ascending + self.case_insensitive = case_insensitive def sort(self, objs): # TODO: Conversion and null-detection here. In Python 3, # comparisons with None fail. We should also support flexible # attributes with different types without falling over. - return sorted(objs, key=attrgetter(self.field), - reverse=not self.ascending) + + def key(item): + field_val = item.get(self.field, '') + if self.case_insensitive and isinstance(field_val, unicode): + field_val = field_val.lower() + return field_val + + return sorted(objs, key=key, reverse=not self.ascending) def __repr__(self): - return u'<{0}: {1}{2}>'.format( + return '<{0}: {1}{2}>'.format( type(self).__name__, self.field, '+' if self.ascending else '-', ) + def __hash__(self): + return hash((self.field, self.ascending)) + + def __eq__(self, other): + return super(FieldSort, self).__eq__(other) and \ + self.field == other.field and \ + self.ascending == other.ascending + class FixedFieldSort(FieldSort): """Sort object to sort on a fixed field. """ def order_clause(self): order = "ASC" if self.ascending else "DESC" - return "{0} {1}".format(self.field, order) + if self.case_insensitive: + field = '(CASE ' \ + 'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \ + 'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \ + 'ELSE {0} END)'.format(self.field) + else: + field = self.field + return "{0} {1}".format(field, order) class SlowFieldSort(FieldSort): @@ -646,5 +840,17 @@ class NullSort(Sort): """No sorting. Leave results unsorted.""" - def sort(items): + def sort(self, items): return items + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + return False + + def __eq__(self, other): + return type(self) == type(other) or other is None + + def __hash__(self): + return 0 diff -Nru beets-1.3.8+dfsg/beets/dbcore/types.py beets-1.3.19/beets/dbcore/types.py --- beets-1.3.8+dfsg/beets/dbcore/types.py 2014-09-15 17:14:34.000000000 +0000 +++ beets-1.3.19/beets/dbcore/types.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,6 +15,8 @@ """Representation of type information for DBCore model fields. """ +from __future__ import division, absolute_import, print_function + from . import query from beets.util import str2bool @@ -93,7 +96,7 @@ http://www.sqlite.org/datatype3.html https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types - Flexible fields have the type afinity `TEXT`. This means the + Flexible fields have the type affinity `TEXT`. This means the `sql_value` is either a `buffer` or a `unicode` object` and the method must handle these in addition. """ diff -Nru beets-1.3.8+dfsg/beets/importer.py beets-1.3.19/beets/importer.py --- beets-1.3.8+dfsg/beets/importer.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/beets/importer.py 2016-06-20 17:08:57.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,14 +13,14 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Provides the basic, interface-agnostic workflow for importing and autotagging music files. """ -from __future__ import print_function import os import re -import logging import pickle import itertools from collections import defaultdict @@ -27,7 +28,9 @@ from bisect import insort, bisect_left from contextlib import contextmanager import shutil +import time +from beets import logging from beets import autotag from beets import library from beets import dbcore @@ -41,11 +44,13 @@ action = Enum('action', ['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID', - 'ALBUMS']) + 'ALBUMS', 'RETAG']) +# The RETAG action represents "don't apply any match, but do record +# new metadata". It's not reachable via the standard command prompt but +# can be used by plugins. QUEUE_SIZE = 128 SINGLE_ARTIST_THRESH = 0.25 -VARIOUS_ARTISTS = u'Various Artists' PROGRESS_KEY = 'tagprogress' HISTORY_KEY = 'taghistory' @@ -64,24 +69,24 @@ def _open_state(): """Reads the state file, returning a dictionary.""" try: - with open(config['statefile'].as_filename()) as f: + with open(config['statefile'].as_filename(), 'rb') as f: return pickle.load(f) except Exception as exc: # The `pickle` module can emit all sorts of exceptions during # unpickling, including ImportError. We use a catch-all # exception to avoid enumerating them all (the docs don't even have a # full list!). - log.debug(u'state file could not be read: {0}'.format(exc)) + log.debug(u'state file could not be read: {0}', exc) return {} def _save_state(state): """Writes the state dictionary out to disk.""" try: - with open(config['statefile'].as_filename(), 'w') as f: + with open(config['statefile'].as_filename(), 'wb') as f: pickle.dump(state, f) except IOError as exc: - log.error(u'state file could not be written: {0}'.format(exc)) + log.error(u'state file could not be written: {0}', exc) # Utilities for reading and writing the beets progress file, which @@ -174,22 +179,28 @@ """Controls an import action. Subclasses should implement methods to communicate with the user or otherwise make decisions. """ - def __init__(self, lib, logfile, paths, query): - """Create a session. `lib` is a Library object. `logfile` is a - file-like object open for writing or None if no logging is to be - performed. Either `paths` or `query` is non-null and indicates + def __init__(self, lib, loghandler, paths, query): + """Create a session. `lib` is a Library object. `loghandler` is a + logging.Handler. Either `paths` or `query` is non-null and indicates the source of files to be imported. """ self.lib = lib - self.logfile = logfile + self.logger = self._setup_logging(loghandler) self.paths = paths self.query = query - self.seen_idents = set() self._is_resuming = dict() # Normalize the paths. if self.paths: - self.paths = map(normpath, self.paths) + self.paths = list(map(normpath, self.paths)) + + def _setup_logging(self, loghandler): + logger = logging.getLogger(__name__) + logger.propagate = False + if not loghandler: + loghandler = logging.NullHandler() + logger.handlers = [loghandler] + return logger def set_config(self, config): """Set `config` property from global import config and make @@ -210,9 +221,13 @@ iconfig['resume'] = False iconfig['incremental'] = False - # Copy and move are mutually exclusive. + # Copy, move, and link are mutually exclusive. if iconfig['move']: iconfig['copy'] = False + iconfig['link'] = False + elif iconfig['link']: + iconfig['copy'] = False + iconfig['move'] = False # Only delete when copying. if not iconfig['copy']: @@ -221,13 +236,10 @@ self.want_resume = config['resume'].as_choice([True, False, 'ask']) def tag_log(self, status, paths): - """Log a message about a given album to logfile. The status should - reflect the reason the album couldn't be tagged. + """Log a message about a given album to the importer log. The status + should reflect the reason the album couldn't be tagged. """ - if self.logfile: - print(u'{0} {1}'.format(status, displayable_path(paths)), - file=self.logfile) - self.logfile.flush() + self.logger.info(u'{0} {1}', status, displayable_path(paths)) def log_choice(self, task, duplicate=False): """Logs the task's current choice if it should be logged. If @@ -238,17 +250,17 @@ if duplicate: # Duplicate: log all three choices (skip, keep both, and trump). if task.should_remove_duplicates: - self.tag_log('duplicate-replace', paths) + self.tag_log(u'duplicate-replace', paths) elif task.choice_flag in (action.ASIS, action.APPLY): - self.tag_log('duplicate-keep', paths) + self.tag_log(u'duplicate-keep', paths) elif task.choice_flag is (action.SKIP): - self.tag_log('duplicate-skip', paths) + self.tag_log(u'duplicate-skip', paths) else: # Non-duplicate: log "skip" and "asis" choices. if task.choice_flag is action.ASIS: - self.tag_log('asis', paths) + self.tag_log(u'asis', paths) elif task.choice_flag is action.SKIP: - self.tag_log('skip', paths) + self.tag_log(u'skip', paths) def should_resume(self, path): raise NotImplementedError @@ -265,6 +277,7 @@ def run(self): """Run the import task. """ + self.logger.info(u'import started {0}', time.asctime()) self.set_config(config['import']) # Set up the pipeline. @@ -273,25 +286,35 @@ else: stages = [query_tasks(self)] - if self.config['group_albums'] and \ - not self.config['singletons']: - # Split directory tasks into one task for each album - stages += [group_albums(self)] - if self.config['autotag']: - # Only look up and query the user when autotagging. - - # FIXME We should also resolve duplicates when not - # autotagging. - stages += [lookup_candidates(self), user_query(self)] + # In pretend mode, just log what would otherwise be imported. + if self.config['pretend']: + stages += [log_files(self)] else: - stages += [import_asis(self)] - stages += [apply_choices(self)] - for stage_func in plugins.import_stages(): - stages.append(plugin_stage(self, stage_func)) - stages += [manipulate_files(self)] + if self.config['group_albums'] and \ + not self.config['singletons']: + # Split directory tasks into one task for each album. + stages += [group_albums(self)] + + # These stages either talk to the user to get a decision or, + # in the case of a non-autotagged import, just choose to + # import everything as-is. In *both* cases, these stages + # also add the music to the library database, so later + # stages need to read and write data from there. + if self.config['autotag']: + stages += [lookup_candidates(self), user_query(self)] + else: + stages += [import_asis(self)] + + # Plugin stages. + for stage_func in plugins.import_stages(): + stages.append(plugin_stage(self, stage_func)) + + stages += [manipulate_files(self)] + pl = pipeline.Pipeline(stages) # Run the pipeline. + plugins.send('import_begin', session=self) try: if config['threaded']: pl.run_parallel(QUEUE_SIZE) @@ -308,7 +331,7 @@ been imported in a previous session. """ if self.is_resuming(toppath) \ - and all(map(lambda p: progress_element(toppath, p), paths)): + and all([progress_element(toppath, p) for p in paths]): return True if self.config['incremental'] \ and tuple(paths) in self.history_dirs: @@ -339,8 +362,8 @@ # Either accept immediately or prompt for input to decide. if self.want_resume is True or \ self.should_resume(toppath): - log.warn(u'Resuming interrupted import of {0}'.format( - util.displayable_path(toppath))) + log.warn(u'Resuming interrupted import of {0}', + util.displayable_path(toppath)) self._is_resuming[toppath] = True else: # Clear progress; we're starting from the top. @@ -349,7 +372,34 @@ # The importer task class. -class ImportTask(object): +class BaseImportTask(object): + """An abstract base class for importer tasks. + + Tasks flow through the importer pipeline. Each stage can update + them. """ + def __init__(self, toppath, paths, items): + """Create a task. The primary fields that define a task are: + + * `toppath`: The user-specified base directory that contains the + music for this task. If the task has *no* user-specified base + (for example, when importing based on an -L query), this can + be None. This is used for tracking progress and history. + * `paths`: A list of *specific* paths where the music for this task + came from. These paths can be directories, when their entire + contents are being imported, or files, when the task comprises + individual tracks. This is used for progress/history tracking and + for displaying the task to the user. + * `items`: A list of `Item` objects representing the music being + imported. + + These fields should not change after initialization. + """ + self.toppath = toppath + self.paths = paths + self.items = items + + +class ImportTask(BaseImportTask): """Represents a single set of items to be imported along with its intermediate state. May represent an album or a single item. @@ -377,22 +427,16 @@ * `finalize()` Update the import progress and cleanup the file system. """ - def __init__(self, toppath=None, paths=None, items=None): - self.toppath = toppath - self.paths = paths - self.items = items + def __init__(self, toppath, paths, items): + super(ImportTask, self).__init__(toppath, paths, items) self.choice_flag = None - # TODO remove this eventually - self.should_remove_duplicates = False - self.is_album = True - - def set_null_candidates(self): - """Set the candidates to indicate no album match was found. - """ - self.cur_artist = None self.cur_album = None - self.candidates = None + self.cur_artist = None + self.candidates = [] self.rec = None + self.should_remove_duplicates = False + self.is_album = True + self.search_ids = [] # user-supplied candidate IDs. def set_choice(self, choice): """Given an AlbumMatch or TrackMatch object or an action constant, @@ -401,7 +445,8 @@ # Not part of the task structure: assert choice not in (action.MANUAL, action.MANUAL_ID) assert choice != action.APPLY # Only used internally. - if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS): + if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS, + action.RETAG): self.choice_flag = choice self.match = None else: @@ -437,10 +482,10 @@ """Returns identifying metadata about the current choice. For albums, this is an (artist, album) pair. For items, this is (artist, title). May only be called when the choice flag is ASIS - (in which case the data comes from the files' current metadata) - or APPLY (data comes from the choice). + or RETAG (in which case the data comes from the files' current + metadata) or APPLY (data comes from the choice). """ - if self.choice_flag is action.ASIS: + if self.choice_flag in (action.ASIS, action.RETAG): return (self.cur_artist, self.cur_album) elif self.choice_flag is action.APPLY: return (self.match.info.artist, self.match.info.album) @@ -451,21 +496,16 @@ If the tasks applies an album match the method only returns the matched items. """ - if self.choice_flag == action.ASIS: + if self.choice_flag in (action.ASIS, action.RETAG): return list(self.items) - # FIXME this should be a simple attribute. There should be no - # need to retrieve the keys of `match.mapping`. This requires - # that we remove unmatched items from the list. elif self.choice_flag == action.APPLY: - return self.match.mapping.keys() + return list(self.match.mapping.keys()) else: assert False def apply_metadata(self): """Copy metadata from match info to the items. """ - # TODO call should be more descriptive like - # apply_metadata(self.match, self.items) autotag.apply_metadata(self.match.info, self.match.mapping) def duplicate_items(self, lib): @@ -476,13 +516,12 @@ def remove_duplicates(self, lib): duplicate_items = self.duplicate_items(lib) - log.debug(u'removing {0} old duplicated items' - .format(len(duplicate_items))) + log.debug(u'removing {0} old duplicated items', len(duplicate_items)) for item in duplicate_items: item.remove() if lib.directory in util.ancestry(item.path): - log.debug(u'deleting duplicate {0}' - .format(util.displayable_path(item.path))) + log.debug(u'deleting duplicate {0}', + util.displayable_path(item.path)) util.remove(item.path) util.prune_dirs(os.path.dirname(item.path), lib.directory) @@ -490,30 +529,26 @@ def finalize(self, session): """Save progress, clean up files, and emit plugin event. """ - # FIXME the session argument is unfortunate. It should be - # present as an attribute of the task. - # Update progress. if session.want_resume: self.save_progress() if session.config['incremental']: self.save_history() + self.cleanup(copy=session.config['copy'], + delete=session.config['delete'], + move=session.config['move']) + if not self.skip: - self.cleanup(copy=session.config['copy'], - delete=session.config['delete'], - move=session.config['move']) self._emit_imported(session.lib) def cleanup(self, copy=False, delete=False, move=False): """Remove and prune imported paths. """ - # FIXME Maybe the keywords should be task properties. - - # FIXME This shouldn't be here. Skipping should be handled in - # the stages. + # Do not delete any files or prune directories when skipping. if self.skip: return + items = self.imported_items() # When copying and deleting originals, delete old files. @@ -531,17 +566,29 @@ self.prune(old_path) def _emit_imported(self, lib): - # FIXME This shouldn't be here. Skipping should be handled in - # the stages. - if self.skip: - return plugins.send('album_imported', lib=lib, album=self.album) + def handle_created(self, session): + """Send the `import_task_created` event for this task. Return a list of + tasks that should continue through the pipeline. By default, this is a + list containing only the task itself, but plugins can replace the task + with new ones. + """ + tasks = plugins.send('import_task_created', session=session, task=self) + if not tasks: + tasks = [self] + else: + # The plugins gave us a list of lists of tasks. Flatten it. + tasks = [t for inner in tasks for t in inner] + return tasks + def lookup_candidates(self): - """Retrieve and store candidates for this album. + """Retrieve and store candidates for this album. User-specified + candidate IDs are stored in self.search_ids: if present, the + initial lookup is restricted to only those IDs. """ artist, album, candidates, recommendation = \ - autotag.tag_album(self.items) + autotag.tag_album(self.items, search_ids=self.search_ids) self.cur_artist = artist self.cur_album = album self.candidates = candidates @@ -573,7 +620,10 @@ return duplicates def align_album_level_fields(self): - """Make the some album fields equal across `self.items` + """Make some album fields equal across `self.items`. For the + RETAG action, we assume that the responsible for returning it + (ie. a plugin) always ensures that the first item contains + valid data on the relevant fields. """ changes = {} @@ -590,10 +640,10 @@ changes['comp'] = False else: # VA. - changes['albumartist'] = VARIOUS_ARTISTS + changes['albumartist'] = config['va_name'].get(unicode) changes['comp'] = True - elif self.choice_flag == action.APPLY: + elif self.choice_flag in (action.APPLY, action.RETAG): # Applying autotagged metadata. Just get AA from the first # item. if not self.items[0].albumartist: @@ -606,36 +656,29 @@ item.update(changes) def manipulate_files(self, move=False, copy=False, write=False, - session=None): + link=False, session=None): items = self.imported_items() # Save the original paths of all items for deletion and pruning # in the next step (finalization). self.old_paths = [item.path for item in items] for item in items: - if session.config['move']: - # Just move the file. - item.move(False) - elif session.config['copy']: - # If it's a reimport, move in-library files and copy - # out-of-library files. Otherwise, copy and keep track - # of the old path. + if move or copy or link: + # In copy and link modes, treat re-imports specially: + # move in-library files. (Out-of-library files are + # copied/moved as usual). old_path = item.path - if self.replaced_items[item]: - # This is a reimport. Move in-library files and copy - # out-of-library files. - if session.lib.directory in util.ancestry(old_path): - item.move(False) - # We moved the item, so remove the - # now-nonexistent file from old_paths. - self.old_paths.remove(old_path) - else: - item.move(True) + if (copy or link) and self.replaced_items[item] and \ + session.lib.directory in util.ancestry(old_path): + item.move() + # We moved the item, so remove the + # now-nonexistent file from old_paths. + self.old_paths.remove(old_path) else: # A normal import. Just copy files and keep track of # old paths. - item.move(True) + item.move(copy, link) - if session.config['write'] and self.apply: + if write and (self.apply or self.choice_flag == action.RETAG): item.try_write() with session.lib.transaction(): @@ -649,29 +692,86 @@ """ self.align_album_level_fields() with lib.transaction(): + self.record_replaced(lib) self.remove_replaced(lib) self.album = lib.add_album(self.imported_items()) + self.reimport_metadata(lib) - def remove_replaced(self, lib): - """Removes all the items from the library that have the same - path as an item from this task. - - Records the replaced items in the `replaced_items` dictionary + def record_replaced(self, lib): + """Records the replaced items and albums in the `replaced_items` + and `replaced_albums` dictionaries. """ self.replaced_items = defaultdict(list) + self.replaced_albums = defaultdict(list) + replaced_album_ids = set() for item in self.imported_items(): dup_items = list(lib.items( dbcore.query.BytesQuery('path', item.path) )) self.replaced_items[item] = dup_items for dup_item in dup_items: - log.debug(u'replacing item {0}: {1}' - .format(dup_item.id, - displayable_path(item.path))) + if (not dup_item.album_id or + dup_item.album_id in replaced_album_ids): + continue + replaced_album = dup_item.get_album() + if replaced_album: + replaced_album_ids.add(dup_item.album_id) + self.replaced_albums[replaced_album.path] = replaced_album + + def reimport_metadata(self, lib): + """For reimports, preserves metadata for reimported items and + albums. + """ + if self.is_album: + replaced_album = self.replaced_albums.get(self.album.path) + if replaced_album: + self.album.added = replaced_album.added + self.album.update(replaced_album._values_flex) + self.album.artpath = replaced_album.artpath + self.album.store() + log.debug( + u'Reimported album: added {0}, flexible ' + u'attributes {1} from album {2} for {3}', + self.album.added, + replaced_album._values_flex.keys(), + replaced_album.id, + displayable_path(self.album.path) + ) + + for item in self.imported_items(): + dup_items = self.replaced_items[item] + for dup_item in dup_items: + if dup_item.added and dup_item.added != item.added: + item.added = dup_item.added + log.debug( + u'Reimported item added {0} ' + u'from item {1} for {2}', + item.added, + dup_item.id, + displayable_path(item.path) + ) + item.update(dup_item._values_flex) + log.debug( + u'Reimported item flexible attributes {0} ' + u'from item {1} for {2}', + dup_item._values_flex.keys(), + dup_item.id, + displayable_path(item.path) + ) + item.store() + + def remove_replaced(self, lib): + """Removes all the items from the library that have the same + path as an item from this task. + """ + for item in self.imported_items(): + for dup_item in self.replaced_items[item]: + log.debug(u'Replacing item {0}: {1}', + dup_item.id, displayable_path(item.path)) dup_item.remove() - log.debug(u'{0} of {1} items replaced' - .format(len(self.replaced_items), - len(self.imported_items()))) + log.debug(u'{0} of {1} items replaced', + sum(bool(l) for l in self.replaced_items.values()), + len(self.imported_items())) def choose_match(self, session): """Ask the session which match should apply and apply it. @@ -707,14 +807,14 @@ """ def __init__(self, toppath, item): - super(SingletonImportTask, self).__init__(toppath, [item.path]) + super(SingletonImportTask, self).__init__(toppath, [item.path], [item]) self.item = item self.is_album = False self.paths = [item.path] def chosen_ident(self): - assert self.choice_flag in (action.ASIS, action.APPLY) - if self.choice_flag is action.ASIS: + assert self.choice_flag in (action.ASIS, action.APPLY, action.RETAG) + if self.choice_flag in (action.ASIS, action.RETAG): return (self.item.artist, self.item.title) elif self.choice_flag is action.APPLY: return (self.match.info.artist, self.match.info.title) @@ -726,15 +826,12 @@ autotag.apply_item_metadata(self.item, self.match.info) def _emit_imported(self, lib): - # FIXME This shouldn't be here. Skipped tasks should be removed from - # the pipeline. - if self.skip: - return for item in self.imported_items(): plugins.send('item_imported', lib=lib, item=item) def lookup_candidates(self): - candidates, recommendation = autotag.tag_item(self.item) + candidates, recommendation = autotag.tag_item( + self.item, search_ids=self.search_ids) self.candidates = candidates self.rec = recommendation @@ -759,8 +856,10 @@ def add(self, lib): with lib.transaction(): + self.record_replaced(lib) self.remove_replaced(lib) lib.add(self.item) + self.reimport_metadata(lib) def infer_album_fields(self): raise NotImplementedError @@ -777,22 +876,20 @@ # FIXME The inheritance relationships are inverted. This is why there -# are so many methods which pass. We should introduce a new -# BaseImportTask class. +# are so many methods which pass. More responsibility should be delegated to +# the BaseImportTask class. class SentinelImportTask(ImportTask): - """This class marks the progress of an import and does not import - any items itself. + """A sentinel task marks the progress of an import and does not + import any items itself. - If only `toppath` is set the task indicats the end of a top-level - directory import. If the `paths` argument is givent, too, the task + If only `toppath` is set the task indicates the end of a top-level + directory import. If the `paths` argument is also given, the task indicates the progress in the `toppath` import. """ - def __init__(self, toppath=None, paths=None): - self.toppath = toppath - self.paths = paths + def __init__(self, toppath, paths): + super(SentinelImportTask, self).__init__(toppath, paths, ()) # TODO Remove the remaining attributes eventually - self.items = None self.should_remove_duplicates = False self.is_album = True self.choice_flag = None @@ -822,13 +919,20 @@ class ArchiveImportTask(SentinelImportTask): - """Additional methods for handling archives. + """An import task that represents the processing of an archive. - Use when `toppath` points to a `zip`, `tar`, or `rar` archive. + `toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks + serve two purposes: + - First, it will unarchive the files to a temporary directory and + return it. The client should read tasks from the resulting + directory and send them through the pipeline. + - Second, it will clean up the temporary directory when it proceeds + through the pipeline. The client should send the archive task + after sending the rest of the music tasks to make this work. """ def __init__(self, toppath): - super(ArchiveImportTask, self).__init__(toppath) + super(ArchiveImportTask, self).__init__(toppath, ()) self.extracted = False @classmethod @@ -872,6 +976,8 @@ """Removes the temporary directory the archive was extracted to. """ if self.extracted: + log.debug(u'Removing extracted directory: {0}', + displayable_path(self.toppath)) shutil.rmtree(self.toppath) def extract(self): @@ -893,24 +999,103 @@ class ImportTaskFactory(object): - """Create album and singleton import tasks from paths for toppaths - in session. - - The `singleton()` and `album()` methods accept paths and return - instances of `SingletonImportTask` and `ImportTask`, respectively. - `None` is returned if either no media file items could be created - from the paths or if the paths have already been imported. In both - cases it logs messages. + """Generate album and singleton import tasks for all media files + indicated by a path. """ def __init__(self, toppath, session): + """Create a new task factory. + + `toppath` is the user-specified path to search for music to + import. `session` is the `ImportSession`, which controls how + tasks are read from the directory. + """ self.toppath = toppath self.session = session - self.skipped = 0 + self.skipped = 0 # Skipped due to incremental/resume. + self.imported = 0 # "Real" tasks created. + self.is_archive = ArchiveImportTask.is_archive(syspath(toppath)) + + def tasks(self): + """Yield all import tasks for music found in the user-specified + path `self.toppath`. Any necessary sentinel tasks are also + produced. + + During generation, update `self.skipped` and `self.imported` + with the number of tasks that were not produced (due to + incremental mode or resumed imports) and the number of concrete + tasks actually produced, respectively. + + If `self.toppath` is an archive, it is adjusted to point to the + extracted data. + """ + # Check whether this is an archive. + if self.is_archive: + archive_task = self.unarchive() + if not archive_task: + return + + # Search for music in the directory. + for dirs, paths in self.paths(): + if self.session.config['singletons']: + for path in paths: + tasks = self._create(self.singleton(path)) + for task in tasks: + yield task + yield self.sentinel(dirs) + + else: + tasks = self._create(self.album(paths, dirs)) + for task in tasks: + yield task + + # Produce the final sentinel for this toppath to indicate that + # it is finished. This is usually just a SentinelImportTask, but + # for archive imports, send the archive task instead (to remove + # the extracted directory). + if self.is_archive: + yield archive_task + else: + yield self.sentinel() + + def _create(self, task): + """Handle a new task to be emitted by the factory. + + Emit the `import_task_created` event and increment the + `imported` count if the task is not skipped. Return the same + task. If `task` is None, do nothing. + """ + if task: + tasks = task.handle_created(self.session) + self.imported += len(tasks) + return tasks + return [] + + def paths(self): + """Walk `self.toppath` and yield `(dirs, files)` pairs where + `files` are individual music files and `dirs` the set of + containing directories where the music was found. + + This can either be a recursive search in the ordinary case, a + single track when `toppath` is a file, a single directory in + `flat` mode. + """ + if not os.path.isdir(syspath(self.toppath)): + yield [self.toppath], [self.toppath] + elif self.session.config['flat']: + paths = [] + for dirs, paths_in_dir in albums_in_dir(self.toppath): + paths += paths_in_dir + yield [self.toppath], paths + else: + for dirs, paths in albums_in_dir(self.toppath): + yield dirs, paths def singleton(self, path): + """Return a `SingletonImportTask` for the music file. + """ if self.session.already_imported(self.toppath, [path]): - log.debug(u'Skipping previously-imported path: {0}' - .format(displayable_path(path))) + log.debug(u'Skipping previously-imported path: {0}', + displayable_path(path)) self.skipped += 1 return None @@ -920,22 +1105,21 @@ else: return None - def album(self, paths, dir=None): - """Return `ImportTask` with all media files from paths. + def album(self, paths, dirs=None): + """Return a `ImportTask` with all media files from paths. - `dir` is a common parent directory of all paths. + `dirs` is a list of parent directories used to record already + imported albums. """ if not paths: return None - if dir: - dirs = [dir] - else: + if dirs is None: dirs = list(set(os.path.dirname(p) for p in paths)) if self.session.already_imported(self.toppath, dirs): - log.debug(u'Skipping previously-imported path: {0}' - .format(displayable_path(dirs))) + log.debug(u'Skipping previously-imported path: {0}', + displayable_path(dirs)) self.skipped += 1 return None @@ -948,14 +1132,46 @@ return None def sentinel(self, paths=None): + """Return a `SentinelImportTask` indicating the end of a + top-level directory import. + """ return SentinelImportTask(self.toppath, paths) + def unarchive(self): + """Extract the archive for this `toppath`. + + Extract the archive to a new directory, adjust `toppath` to + point to the extracted directory, and return an + `ArchiveImportTask`. If extraction fails, return None. + """ + assert self.is_archive + + if not (self.session.config['move'] or + self.session.config['copy']): + log.warn(u"Archive importing requires either " + u"'copy' or 'move' to be enabled.") + return + + log.debug(u'Extracting archive: {0}', + displayable_path(self.toppath)) + archive_task = ArchiveImportTask(self.toppath) + try: + archive_task.extract() + except Exception as exc: + log.error(u'extraction failed: {0}', exc) + return + + # Now read albums from the extracted directory. + self.toppath = archive_task.toppath + log.debug(u'Archive extracted to: {0}', self.toppath) + return archive_task + def read_item(self, path): - """Return an item created from the path. + """Return an `Item` read from the path. - If an item could not be read it returns None and logs an error. + If an item cannot be read, return `None` instead and log an + error. """ - # TODO remove this method. Should be handled in ImportTask creation. try: return library.Item.from_path(path) except library.ReadError as exc: @@ -963,14 +1179,10 @@ # Silently ignore non-music files. pass elif isinstance(exc.reason, mediafile.UnreadableFileError): - log.warn(u'unreadable file: {0}'.format( - displayable_path(path)) - ) + log.warn(u'unreadable file: {0}', displayable_path(path)) else: - log.error(u'error reading {0}: {1}'.format( - displayable_path(path), - exc, - )) + log.error(u'error reading {0}: {1}', + displayable_path(path), exc) # Full-album pipeline stages. @@ -982,78 +1194,22 @@ """ skipped = 0 for toppath in session.paths: - task_factory = ImportTaskFactory(toppath, session) - - # Determine if we want to resume import of the toppath + # Check whether we need to resume the import. session.ask_resume(toppath) - # Extract archives. - archive_task = None - if ArchiveImportTask.is_archive(syspath(toppath)): - if not (session.config['move'] or session.config['copy']): - log.warn(u"Archive importing requires either " - "'copy' or 'move' to be enabled.") - continue - - log.debug(u'extracting archive {0}' - .format(displayable_path(toppath))) - archive_task = ArchiveImportTask(toppath) - try: - archive_task.extract() - except Exception as exc: - log.error(u'extraction failed: {0}'.format(exc)) - continue - - # Continue reading albums from the extracted directory. - toppath = archive_task.toppath - - # Check whether the path is to a file. - if not os.path.isdir(syspath(toppath)): - if session.config['singletons']: - task = task_factory.singleton(toppath) - else: - task = task_factory.album([toppath], dir=toppath) - - if task: - yield task - yield task_factory.sentinel() - continue - - # A flat album import merges all items into one album. - if session.config['flat'] and not session.config['singletons']: - paths = [] - for _, item_paths in albums_in_dir(toppath): - paths += item_paths - task = task_factory.album(paths) - if task: - yield task - yield task_factory.sentinel() - continue - - # Produce paths under this directory. - for dirs, paths in albums_in_dir(toppath): - if session.config['singletons']: - for path in paths: - task = task_factory.singleton(path) - if task: - yield task - yield task_factory.sentinel(dirs) - - else: - task = task_factory.album(paths) - if task: - yield task - - # Indicate the directory is finished. - # FIXME hack to delete extracted archives - if archive_task is None: - yield task_factory.sentinel() - else: - yield archive_task + # Generate tasks. + task_factory = ImportTaskFactory(toppath, session) + for t in task_factory.tasks(): + yield t + skipped += task_factory.skipped + + if not task_factory.imported: + log.warn(u'No files imported from {0}', + displayable_path(toppath)) - # Show skipped directories. + # Show skipped directories (due to incremental/resume). if skipped: - log.info(u'Skipped {0} directories.'.format(skipped)) + log.info(u'Skipped {0} paths.', skipped) def query_tasks(session): @@ -1064,13 +1220,15 @@ if session.config['singletons']: # Search for items. for item in session.lib.items(session.query): - yield SingletonImportTask(None, item) + task = SingletonImportTask(None, item) + for task in task.handle_created(session): + yield task else: # Search for albums. for album in session.lib.albums(session.query): - log.debug(u'yielding album {0}: {1} - {2}' - .format(album.id, album.albumartist, album.album)) + log.debug(u'yielding album {0}: {1} - {2}', + album.id, album.albumartist, album.album) items = list(album.items()) # Clear IDs from re-tagged items so they appear "fresh" when @@ -1079,7 +1237,9 @@ item.id = None item.album_id = None - yield ImportTask(None, [album.item_dir()], items) + task = ImportTask(None, [album.item_dir()], items) + for task in task.handle_created(session): + yield task @pipeline.mutator_stage @@ -1095,7 +1255,12 @@ return plugins.send('import_task_start', session=session, task=task) - log.debug(u'Looking up: {0}'.format(displayable_path(task.paths))) + log.debug(u'Looking up: {0}', displayable_path(task.paths)) + + # Restrict the initial lookup to IDs specified by the user via the -m + # option. Currently all the IDs are passed onto the tasks directly. + task.search_ids = session.config['search_ids'].as_str_seq() + task.lookup_candidates() @@ -1106,7 +1271,7 @@ The coroutine accepts an ImportTask objects. It uses the session's `choose_match` method to determine the `action` for - this task. Depending on the action additional stages are exectuted + this task. Depending on the action additional stages are executed and the processed task is yielded. It emits the ``import_task_choice`` event for plugins. Plugins have @@ -1125,7 +1290,9 @@ # Set up a little pipeline for dealing with the singletons. def emitter(task): for item in task.items: - yield SingletonImportTask(task.toppath, item) + task = SingletonImportTask(task.toppath, item) + for new_task in task.handle_created(session): + yield new_task yield SentinelImportTask(task.toppath, task.paths) ipl = pipeline.Pipeline([ @@ -1146,6 +1313,7 @@ return pipeline.multiple(ipl.pull()) resolve_duplicates(session, task) + apply_choice(session, task) return task @@ -1153,13 +1321,36 @@ """Check if a task conflicts with items or albums already imported and ask the session to resolve this. """ - if task.choice_flag in (action.ASIS, action.APPLY): - ident = task.chosen_ident() + if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG): found_duplicates = task.find_duplicates(session.lib) - if ident in session.seen_idents or found_duplicates: - session.resolve_duplicate(task, found_duplicates) + if found_duplicates: + log.debug(u'found duplicates: {}'.format( + [o.id for o in found_duplicates] + )) + + # Get the default action to follow from config. + duplicate_action = config['import']['duplicate_action'].as_choice({ + u'skip': u's', + u'keep': u'k', + u'remove': u'r', + u'ask': u'a', + }) + log.debug(u'default action for duplicates: {0}', duplicate_action) + + if duplicate_action == u's': + # Skip new. + task.set_choice(action.SKIP) + elif duplicate_action == u'k': + # Keep both. Do nothing; leave the choice intact. + pass + elif duplicate_action == u'r': + # Remove old. + task.should_remove_duplicates = True + else: + # No default action set; ask the session. + session.resolve_duplicate(task, found_duplicates) + session.log_choice(task, True) - session.seen_idents.add(ident) @pipeline.mutator_stage @@ -1172,17 +1363,14 @@ if task.skip: return - log.info(displayable_path(task.paths)) - - # Behave as if ASIS were selected. - task.set_null_candidates() + log.info(u'{}', displayable_path(task.paths)) task.set_choice(action.ASIS) + apply_choice(session, task) -@pipeline.mutator_stage -def apply_choices(session, task): - """A coroutine for applying changes to albums and singletons during - the autotag process. +def apply_choice(session, task): + """Apply the task's choice to the Album or Item it contains and add + it to the library. """ if task.skip: return @@ -1226,6 +1414,7 @@ move=session.config['move'], copy=session.config['copy'], write=session.config['write'], + link=session.config['link'], session=session, ) @@ -1233,9 +1422,24 @@ task.finalize(session) +@pipeline.stage +def log_files(session, task): + """A coroutine (pipeline stage) to log each file to be imported. + """ + if isinstance(task, SingletonImportTask): + log.info(u'Singleton: {0}', displayable_path(task.item['path'])) + elif task.items: + log.info(u'Album: {0}', displayable_path(task.paths[0])) + for item in task.items: + log.info(u' {0}', displayable_path(item['path'])) + + def group_albums(session): - """Group the items of a task by albumartist and album name and create a new - task for each album. Yield the tasks as a multi message. + """A pipeline stage that groups the items of each task into albums + using their metadata. + + Groups are identified using their artist and album fields. The + pipeline stage emits new album tasks for each discovered group. """ def group(item): return (item.albumartist or item.artist, item.album) @@ -1246,15 +1450,19 @@ if task.skip: continue tasks = [] - for _, items in itertools.groupby(task.items, group): - tasks.append(ImportTask(items=list(items))) + sorted_items = sorted(task.items, key=group) + for _, items in itertools.groupby(sorted_items, group): + items = list(items) + task = ImportTask(task.toppath, [i.path for i in items], + items) + tasks += task.handle_created(session) tasks.append(SentinelImportTask(task.toppath, task.paths)) task = pipeline.multiple(tasks) -MULTIDISC_MARKERS = (r'dis[ck]', r'cd') -MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' +MULTIDISC_MARKERS = (br'dis[ck]', br'cd') +MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d' def albums_in_dir(path): @@ -1265,8 +1473,11 @@ """ collapse_pat = collapse_paths = collapse_items = None ignore = config['ignore'].as_str_seq() + ignore_hidden = config['ignore_hidden'].get(bool) - for root, dirs, files in sorted_walk(path, ignore=ignore, logger=log): + for root, dirs, files in sorted_walk(path, ignore=ignore, + ignore_hidden=ignore_hidden, + logger=log): items = [os.path.join(root, f) for f in files] # If we're currently collapsing the constituent directories in a # multi-disc album, check whether we should continue collapsing @@ -1294,7 +1505,9 @@ # named in this way. start_collapsing = False for marker in MULTIDISC_MARKERS: - marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) + # We're using replace on %s due to lack of .format() on bytestrings + p = MULTIDISC_PAT_FMT.replace(b'%s', marker) + marker_pat = re.compile(p, re.I) match = marker_pat.match(os.path.basename(root)) # Is this directory the root of a nested multi-disc album? @@ -1303,13 +1516,16 @@ start_collapsing = True subdir_pat = None for subdir in dirs: + subdir = util.bytestring_path(subdir) # The first directory dictates the pattern for # the remaining directories. if not subdir_pat: match = marker_pat.match(subdir) if match: + match_group = re.escape(match.group(1)) subdir_pat = re.compile( - r'^%s\d' % re.escape(match.group(1)), re.I + b''.join([b'^', match_group, br'\d']), + re.I ) else: start_collapsing = False @@ -1331,7 +1547,8 @@ # Set the current pattern to match directories with the same # prefix as this one, followed by a digit. collapse_pat = re.compile( - r'^%s\d' % re.escape(match.group(1)), re.I + b''.join([b'^', re.escape(match.group(1)), br'\d']), + re.I ) break diff -Nru beets-1.3.8+dfsg/beets/__init__.py beets-1.3.19/beets/__init__.py --- beets-1.3.8+dfsg/beets/__init__.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beets/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,12 +13,30 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -__version__ = '1.3.8' -__author__ = 'Adrian Sampson ' +from __future__ import division, absolute_import, print_function + +import os -import beets.library from beets.util import confit -Library = beets.library.Library +__version__ = u'1.3.18' +__author__ = u'Adrian Sampson ' + + +class IncludeLazyConfig(confit.LazyConfig): + """A version of Confit's LazyConfig that also merges in data from + YAML files specified in an `include` setting. + """ + def read(self, user=True, defaults=True): + super(IncludeLazyConfig, self).read(user, defaults) + + try: + for view in self['include']: + filename = view.as_filename() + if os.path.isfile(filename): + self.set_file(filename) + except confit.NotFoundError: + pass + -config = confit.LazyConfig('beets', __name__) +config = IncludeLazyConfig('beets', __name__) diff -Nru beets-1.3.8+dfsg/beets/library.py beets-1.3.19/beets/library.py --- beets-1.3.8+dfsg/beets/library.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/beets/library.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,14 +15,16 @@ """The core data store and collection logic for beets. """ +from __future__ import division, absolute_import, print_function + import os import sys -import logging -import shlex import unicodedata import time import re from unidecode import unidecode + +from beets import logging from beets.mediafile import MediaFile, MutagenError, UnreadableFileError from beets import plugins from beets import util @@ -38,31 +41,85 @@ # Library-specific query types. class PathQuery(dbcore.FieldQuery): - """A query that matches all items under a given path.""" - def __init__(self, field, pattern, fast=True): + """A query that matches all items under a given path. + + Matching can either be case-insensitive or case-sensitive. By + default, the behavior depends on the OS: case-insensitive on Windows + and case-sensitive otherwise. + """ + + escape_re = re.compile(br'[\\_%]') + escape_char = b'\\' + + def __init__(self, field, pattern, fast=True, case_sensitive=None): + """Create a path query. `pattern` must be a path, either to a + file or a directory. + + `case_sensitive` can be a bool or `None`, indicating that the + behavior should depend on the filesystem. + """ super(PathQuery, self).__init__(field, pattern, fast) + # By default, the case sensitivity depends on the filesystem + # that the query path is located on. + if case_sensitive is None: + path = util.bytestring_path(util.normpath(pattern)) + case_sensitive = beets.util.case_sensitive(path) + self.case_sensitive = case_sensitive + + # Use a normalized-case pattern for case-insensitive matches. + if not case_sensitive: + pattern = pattern.lower() + # Match the path as a single file. self.file_path = util.bytestring_path(util.normpath(pattern)) # As a directory (prefix). - self.dir_path = util.bytestring_path(os.path.join(self.file_path, '')) + self.dir_path = util.bytestring_path(os.path.join(self.file_path, b'')) + + @classmethod + def is_path_query(cls, query_part): + """Try to guess whether a unicode query part is a path query. + + Condition: separator precedes colon and the file exists. + """ + colon = query_part.find(':') + if colon != -1: + query_part = query_part[:colon] + + # Test both `sep` and `altsep` (i.e., both slash and backslash on + # Windows). + return ( + (os.sep in query_part or + (os.altsep and os.altsep in query_part)) and + os.path.exists(syspath(normpath(query_part))) + ) def match(self, item): - return (item.path == self.file_path) or \ - item.path.startswith(self.dir_path) + path = item.path if self.case_sensitive else item.path.lower() + return (path == self.file_path) or path.startswith(self.dir_path) - def clause(self): - dir_pat = buffer(self.dir_path + '%') - file_blob = buffer(self.file_path) - return '({0} = ?) || ({0} LIKE ?)'.format(self.field), \ - (file_blob, dir_pat) + def col_clause(self): + if self.case_sensitive: + file_blob = buffer(self.file_path) + dir_blob = buffer(self.dir_path) + return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \ + (file_blob, len(dir_blob), dir_blob) + + escape = lambda m: self.escape_char + m.group(0) + dir_pattern = self.escape_re.sub(escape, self.dir_path) + dir_blob = buffer(dir_pattern + b'%') + file_pattern = self.escape_re.sub(escape, self.file_path) + file_blob = buffer(file_pattern) + return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format( + self.field), (file_blob, self.escape_char, dir_blob, + self.escape_char) # Library-specific field types. class DateType(types.Float): # TODO representation should be `datetime` object - # TODO distinguish beetween date and time types + # TODO distinguish between date and time types query = dbcore.query.DateQuery def format(self, value): @@ -111,7 +168,7 @@ return self.normalize(sql_value) def to_sql(self, value): - if isinstance(value, str): + if isinstance(value, bytes): value = buffer(value) return value @@ -134,6 +191,7 @@ for flat, sharp in self.ENHARMONIC.items(): key = re.sub(flat, sharp, key) key = re.sub(r'[\W\s]+minor', 'm', key) + key = re.sub(r'[\W\s]+major', '', key) return key.capitalize() def normalize(self, key): @@ -143,31 +201,57 @@ return self.parse(key) +class DurationType(types.Float): + """Human-friendly (M:SS) representation of a time interval.""" + query = dbcore.query.DurationQuery + + def format(self, value): + if not beets.config['format_raw_length'].get(bool): + return beets.ui.human_seconds_short(value or 0.0) + else: + return value + + def parse(self, string): + try: + # Try to format back hh:ss to seconds. + return util.raw_seconds_short(string) + except ValueError: + # Fall back to a plain float. + try: + return float(string) + except ValueError: + return self.null + + # Library-specific sort types. class SmartArtistSort(dbcore.query.Sort): """Sort by artist (either album artist or track artist), prioritizing the sort field over the raw field. """ - def __init__(self, model_cls, ascending=True): + def __init__(self, model_cls, ascending=True, case_insensitive=True): self.album = model_cls is Album self.ascending = ascending + self.case_insensitive = case_insensitive def order_clause(self): order = "ASC" if self.ascending else "DESC" - if self.album: - field = 'albumartist' - else: - field = 'artist' + field = 'albumartist' if self.album else 'artist' + collate = 'COLLATE NOCASE' if self.case_insensitive else '' return ('(CASE {0}_sort WHEN NULL THEN {0} ' 'WHEN "" THEN {0} ' - 'ELSE {0}_sort END) {1}').format(field, order) + 'ELSE {0}_sort END) {1} {2}').format(field, collate, order) def sort(self, objs): if self.album: - key = lambda a: a.albumartist_sort or a.albumartist + field = lambda a: a.albumartist_sort or a.albumartist else: - key = lambda i: i.artist_sort or i.artist + field = lambda i: i.artist_sort or i.artist + + if self.case_insensitive: + key = lambda x: field(x).lower() + else: + key = field return sorted(objs, key=key, reverse=not self.ascending) @@ -223,6 +307,10 @@ """Shared concrete functionality for Items and Albums. """ + _format_config_key = None + """Config key that specifies how an instance should be formatted. + """ + def _template_funcs(self): funcs = DefaultTemplateFunctions(self, self._db).functions() funcs.update(plugins.template_funcs()) @@ -230,15 +318,31 @@ def store(self): super(LibModel, self).store() - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) def remove(self): super(LibModel, self).remove() - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) def add(self, lib=None): super(LibModel, self).add(lib) - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) + + def __format__(self, spec): + if not spec: + spec = beets.config[self._format_config_key].get(unicode) + result = self.evaluate_template(spec) + if isinstance(spec, bytes): + # if spec is a byte string then we must return a one as well + return result.encode('utf8') + else: + return result + + def __str__(self): + return format(self).encode('utf8') + + def __unicode__(self): + return format(self) class FormattedItemMapping(dbcore.db.FormattedMapping): @@ -350,7 +454,7 @@ 'original_day': types.PaddedInt(2), 'initial_key': MusicalKey(), - 'length': types.FLOAT, + 'length': DurationType(), 'bitrate': types.ScaledInt(1000, u'kbps'), 'format': types.STRING, 'samplerate': types.ScaledInt(1000, u'kHz'), @@ -363,6 +467,10 @@ _search_fields = ('artist', 'title', 'comments', 'album', 'albumartist', 'genre') + _types = { + 'data_source': types.STRING, + } + _media_fields = set(MediaFile.readable_fields()) \ .intersection(_fields.keys()) """Set of item fields that are backed by `MediaFile` fields. @@ -372,14 +480,25 @@ `write`. """ + _media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys()) + """Set of item fields that are backed by *writable* `MediaFile` tag + fields. + + This excludes fields that represent audio data, such as `bitrate` or + `length`. + """ + _formatter = FormattedItemMapping _sorts = {'artist': SmartArtistSort} + _format_config_key = 'format_item' + @classmethod def _getters(cls): getters = plugins.item_field_getters() getters['singleton'] = lambda i: i.album_id is None + getters['filesize'] = Item.try_filesize # In bytes. return getters @classmethod @@ -400,7 +519,7 @@ if isinstance(value, unicode): value = bytestring_path(value) elif isinstance(value, buffer): - value = str(value) + value = bytes(value) if key in MediaFile.fields(): self.mtime = 0 # Reset mtime on dirty. @@ -447,10 +566,7 @@ for key in self._media_fields: value = getattr(mediafile, key) if isinstance(value, (int, long)): - # Filter values wider than 64 bits (in signed representation). - # SQLite cannot store them. py26: Post transition, we can use: - # value.bit_length() > 63 - if abs(value) >= 2 ** 63: + if value.bit_length() > 63: value = 0 self[key] = value @@ -460,12 +576,18 @@ self.path = read_path - def write(self, path=None): + def write(self, path=None, tags=None): """Write the item's metadata to a media file. All fields in `_media_fields` are written to disk according to the values on this object. + `path` is the path of the mediafile to write the data to. It + defaults to the item's path. + + `tags` is a dictionary of additional metadata the should be + written to the file. (These tags need not be in `_media_fields`.) + Can raise either a `ReadError` or a `WriteError`. """ if path is None: @@ -473,16 +595,23 @@ else: path = normpath(path) - tags = dict(self) - plugins.send('write', item=self, path=path, tags=tags) + # Get the data to write to the file. + item_tags = dict(self) + item_tags = {k: v for k, v in item_tags.items() + if k in self._media_fields} # Only write media fields. + if tags is not None: + item_tags.update(tags) + plugins.send('write', item=self, path=path, tags=item_tags) + # Open the file. try: mediafile = MediaFile(syspath(path), id3v23=beets.config['id3v23'].get(bool)) except (OSError, IOError, UnreadableFileError) as exc: raise ReadError(self.path, exc) - mediafile.update(tags) + # Write the tags to the file. + mediafile.update(item_tags) try: mediafile.save() except (OSError, IOError, MutagenError) as exc: @@ -493,38 +622,44 @@ self.mtime = self.current_mtime() plugins.send('after_write', item=self, path=path) - def try_write(self, path=None): + def try_write(self, path=None, tags=None): """Calls `write()` but catches and logs `FileOperationError` exceptions. Returns `False` an exception was caught and `True` otherwise. """ try: - self.write(path) + self.write(path, tags) return True except FileOperationError as exc: - log.error(exc) + log.error(u"{0}", exc) return False - def try_sync(self, write=None): - """Synchronize the item with the database and the media file - tags, updating them with this object's current state. - - By default, the current `path` for the item is used to write - tags. If `write` is `False`, no tags are written. If `write` is - a path, tags are written to that file instead. - - Similar to calling :meth:`write` and :meth:`store`. - """ - if write is True: - write = None - if write is not False: - self.try_write(path=write) + def try_sync(self, write, move, with_album=True): + """Synchronize the item with the database and, possibly, updates its + tags on disk and its path (by moving the file). + + `write` indicates whether to write new tags into the file. Similarly, + `move` controls whether the path should be updated. In the + latter case, files are *only* moved when they are inside their + library's directory (if any). + + Similar to calling :meth:`write`, :meth:`move`, and :meth:`store` + (conditionally). + """ + if write: + self.try_write() + if move: + # Check whether this file is inside the library directory. + if self._db and self._db.directory in util.ancestry(self.path): + log.debug(u'moving {0} to synchronize path', + util.displayable_path(self.path)) + self.move(with_album=with_album) self.store() # Files themselves. - def move_file(self, dest, copy=False): + def move_file(self, dest, copy=False, link=False): """Moves or copies the item's file, updating the path value if the move succeeds. If a file exists at ``dest``, then it is slightly modified to be unique. @@ -535,6 +670,10 @@ util.copy(self.path, dest) plugins.send("item_copied", item=self, source=self.path, destination=dest) + elif link: + util.link(self.path, dest) + plugins.send("item_linked", item=self, source=self.path, + destination=dest) else: plugins.send("before_item_moved", item=self, source=self.path, destination=dest) @@ -551,6 +690,17 @@ """ return int(os.path.getmtime(syspath(self.path))) + def try_filesize(self): + """Get the size of the underlying file in bytes. + + If the file is missing, return 0 (and log a warning). + """ + try: + return os.path.getsize(syspath(self.path)) + except (OSError, Exception) as exc: + log.warning(u'could not get filesize: {0}', exc) + return 0 + # Model methods. def remove(self, delete=False, with_album=True): @@ -576,13 +726,14 @@ self._db._memotable = {} - def move(self, copy=False, basedir=None, with_album=True): + def move(self, copy=False, link=False, basedir=None, with_album=True): """Move the item to its designated location within the library directory (provided by destination()). Subdirectories are created as needed. If the operation succeeds, the item's path field is updated to reflect the new location. - If copy is True, moving the file is copied rather than moved. + If `copy` is true, moving the file is copied rather than moved. + Similarly, `link` creates a symlink instead. basedir overrides the library base directory for the destination. @@ -604,7 +755,7 @@ # Perform the move and store the change. old_path = self.path - self.move_file(dest, copy) + self.move_file(dest, copy, link) self.store() # If this item is in an album, move its art. @@ -650,7 +801,7 @@ if query == PF_KEY_DEFAULT: break else: - assert False, "no default path format" + assert False, u"no default path format" if isinstance(path_format, Template): subpath_tmpl = path_format else: @@ -668,29 +819,27 @@ if beets.config['asciify_paths']: subpath = unidecode(subpath) - # Truncate components and remove forbidden characters. - subpath = util.sanitize_path(subpath, self._db.replacements) - - # Encode for the filesystem. - if not fragment: - subpath = bytestring_path(subpath) - - # Preserve extension. - _, extension = os.path.splitext(self.path) - if fragment: - # Outputting Unicode. - extension = extension.decode('utf8', 'ignore') - subpath += extension.lower() - - # Truncate too-long components. maxlen = beets.config['max_filename_length'].get(int) if not maxlen: # When zero, try to determine from filesystem. maxlen = util.max_filename_length(self._db.directory) - subpath = util.truncate_path(subpath, maxlen) + + subpath, fellback = util.legalize_path( + subpath, self._db.replacements, maxlen, + os.path.splitext(self.path)[1], fragment + ) + if fellback: + # Print an error message if legalization fell back to + # default replacements because of the maximum length. + log.warning( + u'Fell back to default replacements when naming ' + u'file {}. Configure replacements to avoid lengthening ' + u'the filename.', + subpath + ) if fragment: - return subpath + return util.as_string(subpath) else: return normpath(os.path.join(basedir, subpath)) @@ -702,6 +851,7 @@ """ _table = 'albums' _flex_table = 'album_attributes' + _always_dirty = True _fields = { 'id': types.PRIMARY_ID, 'artpath': PathType(), @@ -715,7 +865,6 @@ 'year': types.PaddedInt(4), 'month': types.PaddedInt(2), 'day': types.PaddedInt(2), - 'tracktotal': types.PaddedInt(2), 'disctotal': types.PaddedInt(2), 'comp': types.BOOLEAN, 'mb_albumid': types.STRING, @@ -729,7 +878,6 @@ 'language': types.STRING, 'country': types.STRING, 'albumstatus': types.STRING, - 'media': types.STRING, 'albumdisambig': types.STRING, 'rg_album_gain': types.NULL_FLOAT, 'rg_album_peak': types.NULL_FLOAT, @@ -740,6 +888,11 @@ _search_fields = ('album', 'albumartist', 'genre') + _types = { + 'path': PathType(), + 'data_source': types.STRING, + } + _sorts = { 'albumartist': SmartArtistSort, 'artist': SmartArtistSort, @@ -755,7 +908,6 @@ 'year', 'month', 'day', - 'tracktotal', 'disctotal', 'comp', 'mb_albumid', @@ -769,7 +921,6 @@ 'language', 'country', 'albumstatus', - 'media', 'albumdisambig', 'rg_album_gain', 'rg_album_peak', @@ -780,12 +931,15 @@ """List of keys that are set on an album's items. """ + _format_config_key = 'format_album' + @classmethod def _getters(cls): # In addition to plugin-provided computed fields, also expose # the album's directory as `path`. getters = plugins.album_field_getters() getters['path'] = Album.item_dir + getters['albumtotal'] = Album._albumtotal return getters def items(self): @@ -814,7 +968,7 @@ for item in self.items(): item.remove(delete, False) - def move_art(self, copy=False): + def move_art(self, copy=False, link=False): """Move or copy any existing album art so that it remains in the same directory as the items. """ @@ -827,11 +981,13 @@ return new_art = util.unique_path(new_art) - log.debug(u'moving album art {0} to {1}' - .format(util.displayable_path(old_art), - util.displayable_path(new_art))) + log.debug(u'moving album art {0} to {1}', + util.displayable_path(old_art), + util.displayable_path(new_art)) if copy: util.copy(old_art, new_art) + elif link: + util.link(old_art, new_art) else: util.move(old_art, new_art) self.artpath = new_art @@ -841,7 +997,7 @@ util.prune_dirs(os.path.dirname(old_art), self._db.directory) - def move(self, copy=False, basedir=None): + def move(self, copy=False, link=False, basedir=None): """Moves (or copies) all items to their destination. Any album art moves along with them. basedir overrides the library base directory for the destination. The album is stored to the @@ -856,10 +1012,10 @@ # Move items. items = list(self.items()) for item in items: - item.move(copy, basedir=basedir, with_album=False) + item.move(copy, link, basedir=basedir, with_album=False) # Move art. - self.move_art(copy) + self.move_art(copy, link) self.store() def item_dir(self): @@ -868,9 +1024,30 @@ """ item = self.items().get() if not item: - raise ValueError('empty album') + raise ValueError(u'empty album') return os.path.dirname(item.path) + def _albumtotal(self): + """Return the total number of tracks on all discs on the album + """ + if self.disctotal == 1 or not beets.config['per_disc_numbering']: + return self.items()[0].tracktotal + + counted = [] + total = 0 + + for item in self.items(): + if item.disc in counted: + continue + + total += item.tracktotal + counted.append(item.disc) + + if len(counted) == self.disctotal: + break + + return total + def art_destination(self, image, item_dir=None): """Returns a path to the destination for the album art image for the album. `image` is the path of the image that will be @@ -900,6 +1077,8 @@ """Sets the album's cover art to the image at the given path. The image is copied (or moved) into place, replacing any existing art. + + Sends an 'art_set' event with `self` as the sole argument. """ path = bytestring_path(path) oldart = self.artpath @@ -923,6 +1102,8 @@ util.move(path, artdest) self.artpath = artdest + plugins.send('art_set', album=self) + def store(self): """Update the database with the album information. The album's tracks are also updated. @@ -941,15 +1122,18 @@ item[key] = value item.store() - def try_sync(self, write=True): - """Synchronize the album and its items with the database and - their files by updating them with this object's current state. - - `write` indicates whether to write tags to the item files. + def try_sync(self, write, move): + """Synchronize the album and its items with the database. + Optionally, also write any new tags into the files and update + their paths. + + `write` indicates whether to write tags to the item files, and + `move` controls whether files (both audio and album art) are + moved. """ self.store() for item in self.items(): - item.try_sync(bool(write)) + item.try_sync(write, move) # Query construction helpers. @@ -967,26 +1151,24 @@ # Special-case path-like queries, which are non-field queries # containing path separators (/). - if 'path' in model_cls._fields: - path_parts = [] - non_path_parts = [] - for s in parts: - if s.find(os.sep, 0, s.find(':')) != -1: - # Separator precedes colon. - path_parts.append(s) - else: - non_path_parts.append(s) - else: - path_parts = () - non_path_parts = parts + path_parts = [] + non_path_parts = [] + for s in parts: + if PathQuery.is_path_query(s): + path_parts.append(s) + else: + non_path_parts.append(s) query, sort = dbcore.parse_sorted_query( model_cls, non_path_parts, prefixes ) # Add path queries to aggregate query. - if path_parts: - query.subqueries += [PathQuery('path', s) for s in path_parts] + # Match field / flexattr depending on whether the model has the path field + fast_path_query = 'path' in model_cls._fields + query.subqueries += [PathQuery('path', s, fast_path_query) + for s in path_parts] + return query, sort @@ -996,12 +1178,11 @@ The string is split into components using shell-like syntax. """ - # A bug in Python < 2.7.3 prevents correct shlex splitting of - # Unicode strings. - # http://bugs.python.org/issue6988 - if isinstance(s, unicode): - s = s.encode('utf8') - parts = [p.decode('utf8') for p in shlex.split(s)] + assert isinstance(s, unicode), u"Query is not unicode: {0!r}".format(s) + try: + parts = util.shlex_split(s) + except ValueError as exc: + raise dbcore.InvalidQueryError(s, exc) return parse_query_parts(parts, model_cls) @@ -1071,11 +1252,14 @@ in the query string the `sort` argument is ignored. """ # Parse the query, if necessary. - parsed_sort = None - if isinstance(query, basestring): - query, parsed_sort = parse_query_string(query, model_cls) - elif isinstance(query, (list, tuple)): - query, parsed_sort = parse_query_parts(query, model_cls) + try: + parsed_sort = None + if isinstance(query, basestring): + query, parsed_sort = parse_query_string(query, model_cls) + elif isinstance(query, (list, tuple)): + query, parsed_sort = parse_query_parts(query, model_cls) + except dbcore.query.InvalidQueryArgumentTypeError as exc: + raise dbcore.InvalidQueryError(query, exc) # Any non-null sort specified by the parsed query overrides the # provided sort. @@ -1086,21 +1270,29 @@ model_cls, query, sort ) + @staticmethod + def get_default_album_sort(): + """Get a :class:`Sort` object for albums from the config option. + """ + return dbcore.sort_from_strings( + Album, beets.config['sort_album'].as_str_seq()) + + @staticmethod + def get_default_item_sort(): + """Get a :class:`Sort` object for items from the config option. + """ + return dbcore.sort_from_strings( + Item, beets.config['sort_item'].as_str_seq()) + def albums(self, query=None, sort=None): """Get :class:`Album` objects matching the query. """ - sort = sort or dbcore.sort_from_strings( - Album, beets.config['sort_album'].as_str_seq() - ) - return self._fetch(Album, query, sort) + return self._fetch(Album, query, sort or self.get_default_album_sort()) def items(self, query=None, sort=None): """Get :class:`Item` objects matching the query. """ - sort = sort or dbcore.sort_from_strings( - Item, beets.config['sort_item'].as_str_seq() - ) - return self._fetch(Item, query, sort) + return self._fetch(Item, query, sort or self.get_default_item_sort()) # Convenience accessors. @@ -1142,7 +1334,7 @@ _prefix = 'tmpl_' def __init__(self, item=None, lib=None): - """Paramaterize the functions. If `item` or `lib` is None, then + """Parametrize the functions. If `item` or `lib` is None, then some functions (namely, ``aunique``) will always evaluate to the empty string. """ @@ -1209,11 +1401,11 @@ return unidecode(s) @staticmethod - def tmpl_time(s, format): + def tmpl_time(s, fmt): """Format a time value using `strftime`. """ cur_fmt = beets.config['time_format'].get(unicode) - return time.strftime(format, time.strptime(s, cur_fmt)) + return time.strftime(fmt, time.strptime(s, cur_fmt)) def tmpl_aunique(self, keys=None, disam=None): """Generate a string that is guaranteed to be unique among all @@ -1247,7 +1439,7 @@ # Find matching albums to disambiguate with. subqueries = [] for key in keys: - value = getattr(album, key) + value = album.get(key, '') subqueries.append(dbcore.MatchQuery(key, value)) albums = self.lib.albums(dbcore.AndQuery(subqueries)) @@ -1260,7 +1452,7 @@ # Find the first disambiguator that distinguishes the albums. for disambiguator in disam: # Get the value for each album for the current field. - disam_values = set([getattr(a, disambiguator) for a in albums]) + disam_values = set([a.get(disambiguator, '') for a in albums]) # If the set of unique values is equal to the number of # albums in the disambiguation set, we're done -- this is @@ -1280,6 +1472,35 @@ self.lib._memotable[memokey] = res return res + @staticmethod + def tmpl_first(s, count=1, skip=0, sep=u'; ', join_str=u'; '): + """ Gets the item(s) from x to y in a string separated by something + and join then with something + + :param s: the string + :param count: The number of items included + :param skip: The number of items skipped + :param sep: the separator. Usually is '; ' (default) or '/ ' + :param join_str: the string which will join the items, default '; '. + """ + skip = int(skip) + count = skip + int(count) + return join_str.join(s.split(sep)[skip:count]) + + def tmpl_ifdef(self, field, trueval=u'', falseval=u''): + """ If field exists return trueval or the field (default) + otherwise, emit return falseval (if provided). + + :param field: The name of the field + :param trueval: The string if the condition is true + :param falseval: The string if the condition is false + :return: The string, based on condition + """ + if self.item.formatted().get(field): + return trueval if trueval else self.item.formatted().get(field) + else: + return falseval + # Get the name of tmpl_* functions in the above class. DefaultTemplateFunctions._func_names = \ diff -Nru beets-1.3.8+dfsg/beets/logging.py beets-1.3.19/beets/logging.py --- beets-1.3.8+dfsg/beets/logging.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beets/logging.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A drop-in replacement for the standard-library `logging` module that +allows {}-style log formatting on Python 2 and 3. + +Provides everything the "logging" module does. The only difference is +that when getLogger(name) instantiates a logger that logger uses +{}-style formatting. +""" + +from __future__ import division, absolute_import, print_function + +from copy import copy +from logging import * # noqa +import subprocess +import threading + + +def logsafe(val): + """Coerce a potentially "problematic" value so it can be formatted + in a Unicode log string. + + This works around a number of pitfalls when logging objects in + Python 2: + - Logging path names, which must be byte strings, requires + conversion for output. + - Some objects, including some exceptions, will crash when you call + `unicode(v)` while `str(v)` works fine. CalledProcessError is an + example. + """ + # Already Unicode. + if isinstance(val, unicode): + return val + + # Bytestring: needs decoding. + elif isinstance(val, bytes): + # Blindly convert with UTF-8. Eventually, it would be nice to + # (a) only do this for paths, if they can be given a distinct + # type, and (b) warn the developer if they do this for other + # bytestrings. + return val.decode('utf8', 'replace') + + # A "problem" object: needs a workaround. + elif isinstance(val, subprocess.CalledProcessError): + try: + return unicode(val) + except UnicodeDecodeError: + # An object with a broken __unicode__ formatter. Use __str__ + # instead. + return str(val).decode('utf8', 'replace') + + # Other objects are used as-is so field access, etc., still works in + # the format string. + else: + return val + + +class StrFormatLogger(Logger): + """A version of `Logger` that uses `str.format`-style formatting + instead of %-style formatting. + """ + + class _LogMessage(object): + def __init__(self, msg, args, kwargs): + self.msg = msg + self.args = args + self.kwargs = kwargs + + def __str__(self): + args = [logsafe(a) for a in self.args] + kwargs = dict((k, logsafe(v)) for (k, v) in self.kwargs.items()) + return self.msg.format(*args, **kwargs) + + def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs): + """Log msg.format(*args, **kwargs)""" + m = self._LogMessage(msg, args, kwargs) + return super(StrFormatLogger, self)._log(level, m, (), exc_info, extra) + + +class ThreadLocalLevelLogger(Logger): + """A version of `Logger` whose level is thread-local instead of shared. + """ + def __init__(self, name, level=NOTSET): + self._thread_level = threading.local() + self.default_level = NOTSET + super(ThreadLocalLevelLogger, self).__init__(name, level) + + @property + def level(self): + try: + return self._thread_level.level + except AttributeError: + self._thread_level.level = self.default_level + return self.level + + @level.setter + def level(self, value): + self._thread_level.level = value + + def set_global_level(self, level): + """Set the level on the current thread + the default value for all + threads. + """ + self.default_level = level + self.setLevel(level) + + +class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger): + pass + + +my_manager = copy(Logger.manager) +my_manager.loggerClass = BeetsLogger + + +def getLogger(name=None): # noqa + if name: + return my_manager.getLogger(name) + else: + return Logger.root diff -Nru beets-1.3.8+dfsg/beets/mediafile.py beets-1.3.19/beets/mediafile.py --- beets-1.3.8+dfsg/beets/mediafile.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beets/mediafile.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -32,8 +33,11 @@ data from the tags. In turn ``MediaField`` uses a number of ``StorageStyle`` strategies to handle format specific logic. """ +from __future__ import division, absolute_import, print_function + import mutagen import mutagen.mp3 +import mutagen.id3 import mutagen.oggopus import mutagen.oggvorbis import mutagen.mp4 @@ -41,18 +45,20 @@ import mutagen.monkeysaudio import mutagen.asf import mutagen.aiff +import codecs import datetime import re import base64 +import binascii import math import struct import imghdr import os -import logging import traceback import enum -from beets.util import displayable_path +from beets import logging +from beets.util import displayable_path, syspath, as_string __all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile'] @@ -141,7 +147,7 @@ return False elif out_type == unicode: - if isinstance(val, str): + if isinstance(val, bytes): return val.decode('utf8', 'ignore') elif isinstance(val, unicode): return val @@ -152,9 +158,12 @@ if isinstance(val, int) or isinstance(val, float): return float(val) else: - if not isinstance(val, basestring): + if isinstance(val, bytes): + val = val.decode('utf8', 'ignore') + else: val = unicode(val) - match = re.match(r'[\+-]?[0-9\.]+', val.strip()) + match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)', + val.strip()) if match: val = match.group(0) if val: @@ -176,15 +185,15 @@ of exceptions (out-of-bounds, etc.). We should clean this up sometime so that the failure modes are well-defined. """ - type, size = struct.unpack_from(" 0 else None + image_data = frame.value[text_delimiter_index + 1:] + images.append(Image(data=image_data, type=cover_type, + desc=comment)) + except KeyError: + pass + + return images + + def set_list(self, mutagen_file, values): + self.delete(mutagen_file) + + for image in values: + image_type = image.type or ImageType.other + comment = image.desc or '' + image_data = comment.encode('utf8') + b'\x00' + image.data + cover_tag = self.TAG_NAMES[image_type] + mutagen_file[cover_tag] = image_data + + def delete(self, mutagen_file): + """Remove all images from the file. + """ + for cover_tag in self.TAG_NAMES.values(): + try: + del mutagen_file[cover_tag] + except KeyError: + pass + + # MediaField is a descriptor that represents a single logical field. It # aggregates several StorageStyles describing how to access the data for # each file type. @@ -1088,11 +1192,11 @@ year, month, and day number. Each number is either an integer or None. """ - # Get the underlying data and split on hyphens. + # Get the underlying data and split on hyphens and slashes. datestring = super(DateField, self).__get__(mediafile, None) if isinstance(datestring, basestring): datestring = re.sub(r'[Tt ].*$', '', unicode(datestring)) - items = unicode(datestring).split('-') + items = re.split('[-/]', unicode(datestring)) else: items = [] @@ -1167,18 +1271,31 @@ class CoverArtField(MediaField): """A descriptor that provides access to the *raw image data* for the - first image on a file. This is used for backwards compatibility: the + cover image on a file. This is used for backwards compatibility: the full `ImageListField` provides richer `Image` objects. + + When there are multiple images we try to pick the most likely to be a front + cover. """ def __init__(self): pass def __get__(self, mediafile, _): - try: - return mediafile.images[0].data - except IndexError: + candidates = mediafile.images + if candidates: + return self.guess_cover_image(candidates).data + else: return None + @staticmethod + def guess_cover_image(candidates): + if len(candidates) == 1: + return candidates[0] + try: + return next(c for c in candidates if c.type == ImageType.front) + except StopIteration: + return candidates[0] + def __set__(self, mediafile, data): if data: mediafile.images = [Image(data=data)] @@ -1189,7 +1306,7 @@ delattr(mediafile, 'images') -class ImageListField(MediaField): +class ImageListField(ListMediaField): """Descriptor to access the list of images embedded in tags. The getter returns a list of `Image` instances obtained from @@ -1206,18 +1323,10 @@ ASFImageStorageStyle(), VorbisImageStorageStyle(), FlacImageStorageStyle(), + APEv2ImageStorageStyle(), + out_type=Image, ) - def __get__(self, mediafile, _): - images = [] - for style in self.styles(mediafile.mgfile): - images.extend(style.get_list(mediafile.mgfile)) - return images - - def __set__(self, mediafile, images): - for style in self.styles(mediafile.mgfile): - style.set_list(mediafile.mgfile, images) - # MediaFile is a collection of fields. @@ -1232,6 +1341,7 @@ By default, MP3 files are saved with ID3v2.4 tags. You can use the older ID3v2.3 standard by specifying the `id3v23` option. """ + path = syspath(path) self.path = path unreadable_exc = ( @@ -1250,7 +1360,7 @@ try: self.mgfile = mutagen.File(path) except unreadable_exc as exc: - log.debug(u'header parsing failed: {0}'.format(unicode(exc))) + log.debug(u'header parsing failed: {0}', unicode(exc)) raise UnreadableFileError(path) except IOError as exc: if type(exc) == IOError: @@ -1258,12 +1368,12 @@ # anywhere else. raise else: - log.debug(traceback.format_exc()) + log.debug(u'{}', traceback.format_exc()) raise MutagenError(path, exc) except Exception as exc: # Isolate bugs in Mutagen. - log.debug(traceback.format_exc()) - log.error(u'uncaught Mutagen exception in open: {0}'.format(exc)) + log.debug(u'{}', traceback.format_exc()) + log.error(u'uncaught Mutagen exception in open: {0}', exc) raise MutagenError(path, exc) if self.mgfile is None: @@ -1271,15 +1381,21 @@ raise FileTypeError(path) elif (type(self.mgfile).__name__ == 'M4A' or type(self.mgfile).__name__ == 'MP4'): - # This hack differentiates AAC and ALAC until we find a more - # deterministic approach. Mutagen only sets the sample rate - # for AAC files. See: - # https://github.com/sampsyo/beets/pull/295 - if hasattr(self.mgfile.info, 'sample_rate') and \ - self.mgfile.info.sample_rate > 0: - self.type = 'aac' + info = self.mgfile.info + if hasattr(info, 'codec'): + if info.codec and info.codec.startswith('alac'): + self.type = 'alac' + else: + self.type = 'aac' else: - self.type = 'alac' + # This hack differentiates AAC and ALAC on versions of + # Mutagen < 1.26. Once Mutagen > 1.26 is out and + # required by beets, we can remove this. + if hasattr(self.mgfile.info, 'bitrate') and \ + self.mgfile.info.bitrate > 0: + self.type = 'aac' + else: + self.type = 'alac' elif (type(self.mgfile).__name__ == 'ID3' or type(self.mgfile).__name__ == 'MP3'): self.type = 'mp3' @@ -1329,8 +1445,8 @@ # Propagate these through: they don't represent Mutagen bugs. raise except Exception as exc: - log.debug(traceback.format_exc()) - log.error(u'uncaught Mutagen exception in save: {0}'.format(exc)) + log.debug(u'{}', traceback.format_exc()) + log.error(u'uncaught Mutagen exception in save: {0}', exc) raise MutagenError(self.path, exc) def delete(self): @@ -1354,7 +1470,35 @@ """ for property, descriptor in cls.__dict__.items(): if isinstance(descriptor, MediaField): - yield property + yield as_string(property) + + @classmethod + def _field_sort_name(cls, name): + """Get a sort key for a field name that determines the order + fields should be written in. + + Fields names are kept unchanged, unless they are instances of + :class:`DateItemField`, in which case `year`, `month`, and `day` + are replaced by `date0`, `date1`, and `date2`, respectively, to + make them appear in that order. + """ + if isinstance(cls.__dict__[name], DateItemField): + name = re.sub('year', 'date0', name) + name = re.sub('month', 'date1', name) + name = re.sub('day', 'date2', name) + return name + + @classmethod + def sorted_fields(cls): + """Get the names of all writable metadata fields, sorted in the + order that they should be written. + + This is a lexicographic order, except for instances of + :class:`DateItemField`, which are sorted in year-month-day + order. + """ + for property in sorted(cls.fields(), key=cls._field_sort_name): + yield property @classmethod def readable_fields(cls): @@ -1392,7 +1536,7 @@ the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """ - for field in self.fields(): + for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) @@ -1403,25 +1547,25 @@ title = MediaField( MP3StorageStyle('TIT2'), - MP4StorageStyle("\xa9nam"), + MP4StorageStyle('\xa9nam'), StorageStyle('TITLE'), ASFStorageStyle('Title'), ) artist = MediaField( MP3StorageStyle('TPE1'), - MP4StorageStyle("\xa9ART"), + MP4StorageStyle('\xa9ART'), StorageStyle('ARTIST'), ASFStorageStyle('Author'), ) album = MediaField( MP3StorageStyle('TALB'), - MP4StorageStyle("\xa9alb"), + MP4StorageStyle('\xa9alb'), StorageStyle('ALBUM'), ASFStorageStyle('WM/AlbumTitle'), ) genres = ListMediaField( MP3ListStorageStyle('TCON'), - MP4ListStorageStyle("\xa9gen"), + MP4ListStorageStyle('\xa9gen'), ListStorageStyle('GENRE'), ASFStorageStyle('WM/Genre'), ) @@ -1429,13 +1573,13 @@ composer = MediaField( MP3StorageStyle('TCOM'), - MP4StorageStyle("\xa9wrt"), + MP4StorageStyle('\xa9wrt'), StorageStyle('COMPOSER'), ASFStorageStyle('WM/Composer'), ) grouping = MediaField( MP3StorageStyle('TIT1'), - MP4StorageStyle("\xa9grp"), + MP4StorageStyle('\xa9grp'), StorageStyle('GROUPING'), ASFStorageStyle('WM/ContentGroupDescription'), ) @@ -1475,16 +1619,17 @@ ) lyrics = MediaField( MP3DescStorageStyle(key='USLT'), - MP4StorageStyle("\xa9lyr"), + MP4StorageStyle('\xa9lyr'), StorageStyle('LYRICS'), ASFStorageStyle('WM/Lyrics'), ) comments = MediaField( MP3DescStorageStyle(key='COMM'), - MP4StorageStyle("\xa9cmt"), + MP4StorageStyle('\xa9cmt'), StorageStyle('DESCRIPTION'), StorageStyle('COMMENT'), ASFStorageStyle('WM/Comments'), + ASFStorageStyle('Description') ) bpm = MediaField( MP3StorageStyle('TBPM'), @@ -1523,76 +1668,76 @@ ) artist_sort = MediaField( MP3StorageStyle('TSOP'), - MP4StorageStyle("soar"), + MP4StorageStyle('soar'), StorageStyle('ARTISTSORT'), ASFStorageStyle('WM/ArtistSortOrder'), ) albumartist_sort = MediaField( MP3DescStorageStyle(u'ALBUMARTISTSORT'), - MP4StorageStyle("soaa"), + MP4StorageStyle('soaa'), StorageStyle('ALBUMARTISTSORT'), ASFStorageStyle('WM/AlbumArtistSortOrder'), ) asin = MediaField( MP3DescStorageStyle(u'ASIN'), - MP4StorageStyle("----:com.apple.iTunes:ASIN"), + MP4StorageStyle('----:com.apple.iTunes:ASIN'), StorageStyle('ASIN'), ASFStorageStyle('MusicBrainz/ASIN'), ) catalognum = MediaField( MP3DescStorageStyle(u'CATALOGNUMBER'), - MP4StorageStyle("----:com.apple.iTunes:CATALOGNUMBER"), + MP4StorageStyle('----:com.apple.iTunes:CATALOGNUMBER'), StorageStyle('CATALOGNUMBER'), ASFStorageStyle('WM/CatalogNo'), ) disctitle = MediaField( MP3StorageStyle('TSST'), - MP4StorageStyle("----:com.apple.iTunes:DISCSUBTITLE"), + MP4StorageStyle('----:com.apple.iTunes:DISCSUBTITLE'), StorageStyle('DISCSUBTITLE'), ASFStorageStyle('WM/SetSubTitle'), ) encoder = MediaField( MP3StorageStyle('TENC'), - MP4StorageStyle("\xa9too"), + MP4StorageStyle('\xa9too'), StorageStyle('ENCODEDBY'), StorageStyle('ENCODER'), ASFStorageStyle('WM/EncodedBy'), ) script = MediaField( MP3DescStorageStyle(u'Script'), - MP4StorageStyle("----:com.apple.iTunes:SCRIPT"), + MP4StorageStyle('----:com.apple.iTunes:SCRIPT'), StorageStyle('SCRIPT'), ASFStorageStyle('WM/Script'), ) language = MediaField( MP3StorageStyle('TLAN'), - MP4StorageStyle("----:com.apple.iTunes:LANGUAGE"), + MP4StorageStyle('----:com.apple.iTunes:LANGUAGE'), StorageStyle('LANGUAGE'), ASFStorageStyle('WM/Language'), ) country = MediaField( MP3DescStorageStyle('MusicBrainz Album Release Country'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz " - "Album Release Country"), + MP4StorageStyle('----:com.apple.iTunes:MusicBrainz ' + 'Album Release Country'), StorageStyle('RELEASECOUNTRY'), ASFStorageStyle('MusicBrainz/Album Release Country'), ) albumstatus = MediaField( MP3DescStorageStyle(u'MusicBrainz Album Status'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Status"), + MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Status'), StorageStyle('MUSICBRAINZ_ALBUMSTATUS'), ASFStorageStyle('MusicBrainz/Album Status'), ) media = MediaField( MP3StorageStyle('TMED'), - MP4StorageStyle("----:com.apple.iTunes:MEDIA"), + MP4StorageStyle('----:com.apple.iTunes:MEDIA'), StorageStyle('MEDIA'), ASFStorageStyle('WM/Media'), ) albumdisambig = MediaField( # This tag mapping was invented for beets (not used by Picard, etc). MP3DescStorageStyle(u'MusicBrainz Album Comment'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Comment"), + MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Comment'), StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'), ASFStorageStyle('MusicBrainz/Album Comment'), ) @@ -1600,7 +1745,7 @@ # Release date. date = DateField( MP3StorageStyle('TDRC'), - MP4StorageStyle("\xa9day"), + MP4StorageStyle('\xa9day'), StorageStyle('DATE'), ASFStorageStyle('WM/Year'), year=(StorageStyle('YEAR'),)) @@ -1623,13 +1768,13 @@ # Nonstandard metadata. artist_credit = MediaField( MP3DescStorageStyle(u'Artist Credit'), - MP4StorageStyle("----:com.apple.iTunes:Artist Credit"), + MP4StorageStyle('----:com.apple.iTunes:Artist Credit'), StorageStyle('ARTIST_CREDIT'), ASFStorageStyle('beets/Artist Credit'), ) albumartist_credit = MediaField( MP3DescStorageStyle(u'Album Artist Credit'), - MP4StorageStyle("----:com.apple.iTunes:Album Artist Credit"), + MP4StorageStyle('----:com.apple.iTunes:Album Artist Credit'), StorageStyle('ALBUMARTIST_CREDIT'), ASFStorageStyle('beets/Album Artist Credit'), ) @@ -1703,7 +1848,7 @@ ), MP4StorageStyle( '----:com.apple.iTunes:replaygain_track_gain', - float_places=2, suffix=b' dB' + float_places=2, suffix=' dB' ), MP4SoundCheckStorageStyle( '----:com.apple.iTunes:iTunNORM', diff -Nru beets-1.3.8+dfsg/beets/plugins.py beets-1.3.19/beets/plugins.py --- beets-1.3.8+dfsg/beets/plugins.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beets/plugins.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,12 +15,17 @@ """Support for beets plugins.""" -import logging +from __future__ import division, absolute_import, print_function + +import inspect import traceback +import re from collections import defaultdict -import inspect +from functools import wraps + import beets +from beets import logging from beets import mediafile PLUGIN_NAMESPACE = 'beetsplug' @@ -39,6 +45,23 @@ """ +class PluginLogFilter(logging.Filter): + """A logging filter that identifies the plugin that emitted a log + message. + """ + def __init__(self, plugin): + self.prefix = u'{0}: '.format(plugin.name) + + def filter(self, record): + if hasattr(record.msg, 'msg') and isinstance(record.msg.msg, + basestring): + # A _LogMessage from our hacked-up Logging replacement. + record.msg.msg = self.prefix + record.msg.msg + elif isinstance(record.msg, basestring): + record.msg = self.prefix + record.msg + return True + + # Managing the plugins themselves. class BeetsPlugin(object): @@ -49,7 +72,6 @@ def __init__(self, name=None): """Perform one-time plugin setup. """ - self.import_stages = [] self.name = name or self.__module__.split('.')[-1] self.config = beets.config[self.name] if not self.template_funcs: @@ -58,6 +80,12 @@ self.template_fields = {} if not self.album_template_fields: self.album_template_fields = {} + self.import_stages = [] + + self._log = log.getChild(self.name) + self._log.setLevel(logging.NOTSET) # Use `beets` logger level. + if not any(isinstance(f, PluginLogFilter) for f in self._log.filters): + self._log.addFilter(PluginLogFilter(self)) def commands(self): """Should return a list of beets.ui.Subcommand objects for @@ -65,6 +93,46 @@ """ return () + def get_import_stages(self): + """Return a list of functions that should be called as importer + pipelines stages. + + The callables are wrapped versions of the functions in + `self.import_stages`. Wrapping provides some bookkeeping for the + plugin: specifically, the logging level is adjusted to WARNING. + """ + return [self._set_log_level_and_params(logging.WARNING, import_stage) + for import_stage in self.import_stages] + + def _set_log_level_and_params(self, base_log_level, func): + """Wrap `func` to temporarily set this plugin's logger level to + `base_log_level` + config options (and restore it to its previous + value after the function returns). Also determines which params may not + be sent for backwards-compatibility. + """ + argspec = inspect.getargspec(func) + + @wraps(func) + def wrapper(*args, **kwargs): + assert self._log.level == logging.NOTSET + verbosity = beets.config['verbose'].get(int) + log_level = max(logging.DEBUG, base_log_level - 10 * verbosity) + self._log.setLevel(log_level) + try: + try: + return func(*args, **kwargs) + except TypeError as exc: + if exc.args[0].startswith(func.__name__): + # caused by 'func' and not stuff internal to 'func' + kwargs = dict((arg, val) for arg, val in kwargs.items() + if arg in argspec.args) + return func(*args, **kwargs) + else: + raise + finally: + self._log.setLevel(logging.NOTSET) + return wrapper + def queries(self): """Should return a dict mapping prefixes to Query subclasses. """ @@ -121,37 +189,21 @@ mediafile.MediaFile.add_field(name, descriptor) library.Item._media_fields.add(name) + _raw_listeners = None listeners = None - @classmethod - def register_listener(cls, event, func): - """Add a function as a listener for the specified event. (An - imperative alternative to the @listen decorator.) + def register_listener(self, event, func): + """Add a function as a listener for the specified event. """ - if cls.listeners is None: - cls.listeners = defaultdict(list) - cls.listeners[event].append(func) + wrapped_func = self._set_log_level_and_params(logging.WARNING, func) - @classmethod - def listen(cls, event): - """Decorator that adds a function as an event handler for the - specified event (as a string). The parameters passed to function - will vary depending on what event occurred. - - The function should respond to named parameters. - function(**kwargs) will trap all arguments in a dictionary. - Example: - - >>> @MyPlugin.listen("imported") - >>> def importListener(**kwargs): - ... pass - """ - def helper(func): - if cls.listeners is None: - cls.listeners = defaultdict(list) - cls.listeners[event].append(func) - return func - return helper + cls = self.__class__ + if cls.listeners is None or cls._raw_listeners is None: + cls._raw_listeners = defaultdict(list) + cls.listeners = defaultdict(list) + if func not in cls._raw_listeners[event]: + cls._raw_listeners[event].append(func) + cls.listeners[event].append(wrapped_func) template_funcs = None template_fields = None @@ -195,14 +247,14 @@ BeetsPlugin subclasses desired. """ for name in names: - modname = '%s.%s' % (PLUGIN_NAMESPACE, name) + modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name) try: try: namespace = __import__(modname, None, None) except ImportError as exc: # Again, this is hacky: if exc.args[0].endswith(' ' + name): - log.warn(u'** plugin {0} not found'.format(name)) + log.warn(u'** plugin {0} not found', name) else: raise else: @@ -212,8 +264,11 @@ _classes.add(obj) except: - log.warn(u'** error loading plugin {0}'.format(name)) - log.warn(traceback.format_exc()) + log.warn( + u'** error loading plugin {}:\n{}', + name, + traceback.format_exc(), + ) _instances = {} @@ -262,11 +317,12 @@ for plugin in find_plugins(): plugin_types = getattr(plugin, attr_name, {}) for field in plugin_types: - if field in types: + if field in types and plugin_types[field] != types[field]: raise PluginConflictException( u'Plugin {0} defines flexible field {1} ' - 'which has already been defined.' - .format(plugin.name,)) + u'which has already been defined with ' + u'another type.'.format(plugin.name, field) + ) types.update(plugin_types) return types @@ -346,8 +402,7 @@ """Get a list of import stage functions defined by plugins.""" stages = [] for plugin in find_plugins(): - if hasattr(plugin, 'import_stages'): - stages += plugin.import_stages + stages += plugin.get_import_stages() return stages @@ -389,15 +444,46 @@ def send(event, **arguments): - """Sends an event to all assigned event listeners. Event is the - name of the event to send, all other named arguments go to the - event handler(s). + """Send an event to all assigned event listeners. + + `event` is the name of the event to send, all other named arguments + are passed along to the handlers. - Returns a list of return values from the handlers. + Return a list of non-None values returned from the handlers. """ - log.debug(u'Sending event: {0}'.format(event)) + log.debug(u'Sending event: {0}', event) + results = [] for handler in event_handlers()[event]: - # Don't break legacy plugins if we want to pass more arguments - argspec = inspect.getargspec(handler).args - args = dict((k, v) for k, v in arguments.items() if k in argspec) - handler(**args) + result = handler(**arguments) + if result is not None: + results.append(result) + return results + + +def feat_tokens(for_artist=True): + """Return a regular expression that matches phrases like "featuring" + that separate a main artist or a song title from secondary artists. + The `for_artist` option determines whether the regex should be + suitable for matching artist fields (the default) or title fields. + """ + feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.'] + if for_artist: + feat_words += ['with', 'vs', 'and', 'con', '&'] + return '(?<=\s)(?:{0})(?=\s)'.format( + '|'.join(re.escape(x) for x in feat_words) + ) + + +def sanitize_choices(choices, choices_all): + """Clean up a stringlist configuration attribute: keep only choices + elements present in choices_all, remove duplicate elements, expand '*' + wildcard while keeping original stringlist order. + """ + seen = set() + others = [x for x in choices_all if x not in choices] + res = [] + for s in choices: + if s in list(choices_all) + ['*']: + if not (s in seen or seen.add(s)): + res.extend(list(others) if s == '*' else [s]) + return res diff -Nru beets-1.3.8+dfsg/beets/ui/commands.py beets-1.3.19/beets/ui/commands.py --- beets-1.3.8+dfsg/beets/ui/commands.py 2014-09-16 21:33:25.000000000 +0000 +++ beets-1.3.19/beets/ui/commands.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,18 +16,17 @@ """This module provides the default commands for beets' command-line interface. """ -from __future__ import print_function -import logging +from __future__ import division, absolute_import, print_function + import os -import time -import codecs -import platform import re +from collections import namedtuple, Counter +from itertools import chain import beets from beets import ui -from beets.ui import print_, input_, decargs +from beets.ui import print_, input_, decargs, show_path_changes from beets import autotag from beets.autotag import Recommendation from beets.autotag import hooks @@ -34,12 +34,13 @@ from beets import importer from beets import util from beets.util import syspath, normpath, ancestry, displayable_path -from beets.util.functemplate import Template from beets import library from beets import config +from beets import logging from beets.util.confit import _package_path VARIOUS_ARTISTS = u'Various Artists' +PromptChoice = namedtuple('ExtraChoice', ['short', 'long', 'callback']) # Global logger. log = logging.getLogger('beets') @@ -70,43 +71,47 @@ items = list(lib.items(query)) if album and not albums: - raise ui.UserError('No matching albums found.') + raise ui.UserError(u'No matching albums found.') elif not album and not items: - raise ui.UserError('No matching items found.') + raise ui.UserError(u'No matching items found.') return items, albums # fields: Shows a list of available fields for queries and format strings. +def _print_keys(query): + """Given a SQLite query result, print the `key` field of each + returned row, with identation of 2 spaces. + """ + for row in query: + print_(' ' * 2 + row['key']) + + def fields_func(lib, opts, args): def _print_rows(names): - print(" " + "\n ".join(names)) + names.sort() + print_(" " + "\n ".join(names)) - def _show_plugin_fields(album): - plugin_fields = [] - for plugin in plugins.find_plugins(): - if album: - fdict = plugin.album_template_fields - else: - fdict = plugin.template_fields - plugin_fields += fdict.keys() - if plugin_fields: - print("Template fields from plugins:") - _print_rows(plugin_fields) - - print("Item fields:") - _print_rows(library.Item._fields.keys()) - _show_plugin_fields(False) - - print("\nAlbum fields:") - _print_rows(library.Album._fields.keys()) - _show_plugin_fields(True) + print_(u"Item fields:") + _print_rows(library.Item.all_keys()) + print_(u"Album fields:") + _print_rows(library.Album.all_keys()) + + with lib.transaction() as tx: + # The SQL uses the DISTINCT to get unique values from the query + unique_fields = 'SELECT DISTINCT key FROM (%s)' + + print_(u"Item flexible attributes:") + _print_keys(tx.query(unique_fields % library.Item._flex_table)) + + print_(u"Album flexible attributes:") + _print_keys(tx.query(unique_fields % library.Album._flex_table)) fields_cmd = ui.Subcommand( 'fields', - help='show fields available for queries and format strings' + help=u'show fields available for queries and format strings' ) fields_cmd.func = fields_func default_commands.append(fields_cmd) @@ -119,7 +124,7 @@ def __init__(self): super(HelpCommand, self).__init__( 'help', aliases=('?',), - help='give detailed help on a specific sub-command', + help=u'give detailed help on a specific sub-command', ) def func(self, lib, opts, args): @@ -127,7 +132,7 @@ cmdname = args[0] helpcommand = self.root_parser._subcommand_for_name(cmdname) if not helpcommand: - raise ui.UserError("unknown command '{0}'".format(cmdname)) + raise ui.UserError(u"unknown command '{0}'".format(cmdname)) helpcommand.print_help() else: self.root_parser.print_help() @@ -174,13 +179,13 @@ """Formats a distance (a float) as a colorized similarity percentage string. """ - out = '%.1f%%' % ((1 - dist) * 100) + out = u'%.1f%%' % ((1 - dist) * 100) if dist <= config['match']['strong_rec_thresh'].as_number(): - out = ui.colorize('green', out) + out = ui.colorize('text_success', out) elif dist <= config['match']['medium_rec_thresh'].as_number(): - out = ui.colorize('yellow', out) + out = ui.colorize('text_warning', out) else: - out = ui.colorize('red', out) + out = ui.colorize('text_error', out) return out @@ -197,7 +202,7 @@ if penalties: if limit and len(penalties) > limit: penalties = penalties[:limit] + ['...'] - return ui.colorize('yellow', '(%s)' % ', '.join(penalties)) + return ui.colorize('text_warning', u'(%s)' % ', '.join(penalties)) def show_change(cur_artist, cur_album, match): @@ -248,21 +253,21 @@ artist_l, artist_r = ui.colordiff(artist_l, artist_r) album_l, album_r = ui.colordiff(album_l, album_r) - print_("Correcting tags from:") + print_(u"Correcting tags from:") show_album(artist_l, album_l) - print_("To:") + print_(u"To:") show_album(artist_r, album_r) else: print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info)) # Data URL. if match.info.data_url: - print_('URL:\n %s' % match.info.data_url) + print_(u'URL:\n %s' % match.info.data_url) # Info line. info = [] # Similarity. - info.append('(Similarity: %s)' % dist_string(match.distance)) + info.append(u'(Similarity: %s)' % dist_string(match.distance)) # Penalties. penalties = penalty_string(match.distance) if penalties: @@ -270,12 +275,12 @@ # Disambiguation. disambig = disambig_string(match.info) if disambig: - info.append(ui.colorize('lightgray', '(%s)' % disambig)) + info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig)) print_(' '.join(info)) # Tracks. - pairs = match.mapping.items() - pairs.sort(key=lambda (_, track_info): track_info.index) + pairs = list(match.mapping.items()) + pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index) # Build up LHS and RHS for track difference display. The `lines` list # contains ``(lhs, rhs, width)`` tuples where `width` is the length (in @@ -288,16 +293,16 @@ if medium != track_info.medium or disctitle != track_info.disctitle: media = match.info.media or 'Media' if match.info.mediums > 1 and track_info.disctitle: - lhs = '%s %s: %s' % (media, track_info.medium, - track_info.disctitle) + lhs = u'%s %s: %s' % (media, track_info.medium, + track_info.disctitle) elif match.info.mediums > 1: - lhs = '%s %s' % (media, track_info.medium) + lhs = u'%s %s' % (media, track_info.medium) elif track_info.disctitle: - lhs = '%s: %s' % (media, track_info.disctitle) + lhs = u'%s: %s' % (media, track_info.disctitle) else: lhs = None if lhs: - lines.append((lhs, '', 0)) + lines.append((lhs, u'', 0)) medium, disctitle = track_info.medium, track_info.disctitle # Titles. @@ -315,20 +320,12 @@ cur_track, new_track = format_index(item), format_index(track_info) if cur_track != new_track: if item.track in (track_info.index, track_info.medium_index): - color = 'lightgray' - else: - color = 'red' - if (cur_track + new_track).count('-') == 1: - lhs_track, rhs_track = (ui.colorize(color, cur_track), - ui.colorize(color, new_track)) + color = 'text_highlight_minor' else: - color = 'red' - lhs_track, rhs_track = ui.color_diff_suffix(cur_track, - new_track) - templ = (ui.colorize(color, u' (#') + u'{0}' + - ui.colorize(color, u')')) - lhs += templ.format(lhs_track) - rhs += templ.format(rhs_track) + color = 'text_highlight' + templ = ui.colorize(color, u' (#{0})') + lhs += templ.format(cur_track) + rhs += templ.format(new_track) lhs_width += len(cur_track) + 4 # Length change. @@ -337,12 +334,9 @@ config['ui']['length_diff_thresh'].as_number(): cur_length = ui.human_seconds_short(item.length) new_length = ui.human_seconds_short(track_info.length) - lhs_length, rhs_length = ui.color_diff_suffix(cur_length, - new_length) - templ = (ui.colorize('red', u' (') + u'{0}' + - ui.colorize('red', u')')) - lhs += templ.format(lhs_length) - rhs += templ.format(rhs_length) + templ = ui.colorize('text_highlight', u' ({0})') + lhs += templ.format(cur_length) + rhs += templ.format(new_length) lhs_width += len(cur_length) + 3 # Penalties. @@ -351,9 +345,9 @@ rhs += ' %s' % penalties if lhs != rhs: - lines.append((' * %s' % lhs, rhs, lhs_width)) + lines.append((u' * %s' % lhs, rhs, lhs_width)) elif config['import']['detail']: - lines.append((' * %s' % lhs, '', lhs_width)) + lines.append((u' * %s' % lhs, '', lhs_width)) # Print each track in two columns, or across two lines. col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2 @@ -370,19 +364,23 @@ # Missing and unmatched tracks. if match.extra_tracks: - print_('Missing tracks:') + print_(u'Missing tracks ({0}/{1} - {2:.1%}):'.format( + len(match.extra_tracks), + len(match.info.tracks), + len(match.extra_tracks) / len(match.info.tracks) + )) for track_info in match.extra_tracks: - line = ' ! %s (#%s)' % (track_info.title, format_index(track_info)) + line = u' ! %s (#%s)' % (track_info.title, format_index(track_info)) if track_info.length: - line += ' (%s)' % ui.human_seconds_short(track_info.length) - print_(ui.colorize('yellow', line)) + line += u' (%s)' % ui.human_seconds_short(track_info.length) + print_(ui.colorize('text_warning', line)) if match.extra_items: - print_('Unmatched tracks:') + print_(u'Unmatched tracks ({0}):'.format(len(match.extra_items))) for item in match.extra_items: - line = ' ! %s (#%s)' % (item.title, format_index(item)) + line = u' ! %s (#%s)' % (item.title, format_index(item)) if item.length: - line += ' (%s)' % ui.human_seconds_short(item.length) - print_(ui.colorize('yellow', line)) + line += u' (%s)' % ui.human_seconds_short(item.length) + print_(ui.colorize('text_warning', line)) def show_item_change(item, match): @@ -396,22 +394,22 @@ cur_artist, new_artist = ui.colordiff(cur_artist, new_artist) cur_title, new_title = ui.colordiff(cur_title, new_title) - print_("Correcting track tags from:") - print_(" %s - %s" % (cur_artist, cur_title)) - print_("To:") - print_(" %s - %s" % (new_artist, new_title)) + print_(u"Correcting track tags from:") + print_(u" %s - %s" % (cur_artist, cur_title)) + print_(u"To:") + print_(u" %s - %s" % (new_artist, new_title)) else: - print_("Tagging track: %s - %s" % (cur_artist, cur_title)) + print_(u"Tagging track: %s - %s" % (cur_artist, cur_title)) # Data URL. if match.info.data_url: - print_('URL:\n %s' % match.info.data_url) + print_(u'URL:\n %s' % match.info.data_url) # Info line. info = [] # Similarity. - info.append('(Similarity: %s)' % dist_string(match.distance)) + info.append(u'(Similarity: %s)' % dist_string(match.distance)) # Penalties. penalties = penalty_string(match.distance) if penalties: @@ -419,7 +417,7 @@ # Disambiguation. disambig = disambig_string(match.info) if disambig: - info.append(ui.colorize('lightgray', '(%s)' % disambig)) + info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig)) print_(' '.join(info)) @@ -433,7 +431,7 @@ """ summary_parts = [] if not singleton: - summary_parts.append("{0} items".format(len(items))) + summary_parts.append(u"{0} items".format(len(items))) format_counts = {} for item in items: @@ -442,19 +440,25 @@ # A single format. summary_parts.append(items[0].format) else: - # Enumerate all the formats. - for format, count in format_counts.iteritems(): - summary_parts.append('{0} {1}'.format(format, count)) + # Enumerate all the formats by decreasing frequencies: + for fmt, count in sorted( + format_counts.items(), + key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]) + ): + summary_parts.append('{0} {1}'.format(fmt, count)) + + if items: + average_bitrate = sum([item.bitrate for item in items]) / len(items) + total_duration = sum([item.length for item in items]) + total_filesize = sum([item.filesize for item in items]) + summary_parts.append(u'{0}kbps'.format(int(average_bitrate / 1000))) + summary_parts.append(ui.human_seconds_short(total_duration)) + summary_parts.append(ui.human_bytes(total_filesize)) - average_bitrate = sum([item.bitrate for item in items]) / len(items) - total_duration = sum([item.length for item in items]) - summary_parts.append('{0}kbps'.format(int(average_bitrate / 1000))) - summary_parts.append(ui.human_seconds_short(total_duration)) + return u', '.join(summary_parts) - return ', '.join(summary_parts) - -def _summary_judment(rec): +def _summary_judgment(rec): """Determines whether a decision should be made without even asking the user. This occurs in quiet mode and when an action is chosen for NONE recommendations. Return an action or None if the user should be @@ -481,14 +485,15 @@ return None if action == importer.action.SKIP: - print_('Skipping.') + print_(u'Skipping.') elif action == importer.action.ASIS: - print_('Importing as-is.') + print_(u'Importing as-is.') return action def choose_candidate(candidates, singleton, rec, cur_artist=None, - cur_album=None, item=None, itemcount=None): + cur_album=None, item=None, itemcount=None, + extra_choices=[]): """Given a sorted list of candidates, ask the user for a selection of which candidate to use. Applies to both full albums and singletons (tracks). Candidates are either AlbumMatch or TrackMatch @@ -496,8 +501,16 @@ `cur_album`, and `itemcount` must be provided. For singletons, `item` must be provided. - Returns the result of the choice, which may SKIP, ASIS, TRACKS, or - MANUAL or a candidate (an AlbumMatch/TrackMatch object). + `extra_choices` is a list of `PromptChoice`s, containg the choices + appended by the plugins after receiving the `before_choose_candidate` + event. If not empty, the choices are appended to the prompt presented + to the user. + + Returns one of the following: + * the result of the choice, which may be SKIP, ASIS, TRACKS, or MANUAL + * a candidate (an AlbumMatch/TrackMatch object) + * the short letter of a `PromptChoice` (if the user selected one of + the `extra_choices`). """ # Sanity check. if singleton: @@ -506,35 +519,41 @@ assert cur_artist is not None assert cur_album is not None + # Build helper variables for extra choices. + extra_opts = tuple(c.long for c in extra_choices) + extra_actions = tuple(c.short for c in extra_choices) + # Zero candidates. if not candidates: if singleton: - print_("No matching recordings found.") - opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id', - 'aBort') + print_(u"No matching recordings found.") + opts = (u'Use as-is', u'Skip', u'Enter search', u'enter Id', + u'aBort') else: - print_("No matching release found for {0} tracks." + print_(u"No matching release found for {0} tracks." .format(itemcount)) - print_('For help, see: ' - 'http://beets.readthedocs.org/en/latest/faq.html#nomatch') - opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip', - 'Enter search', 'enter Id', 'aBort') - sel = ui.input_options(opts) - if sel == 'u': + print_(u'For help, see: ' + u'http://beets.readthedocs.org/en/latest/faq.html#nomatch') + opts = (u'Use as-is', u'as Tracks', u'Group albums', u'Skip', + u'Enter search', u'enter Id', u'aBort') + sel = ui.input_options(opts + extra_opts) + if sel == u'u': return importer.action.ASIS - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 's': + elif sel == u's': return importer.action.SKIP - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS + elif sel in extra_actions: + return sel else: assert False @@ -576,35 +595,39 @@ # Disambiguation disambig = disambig_string(match.info) if disambig: - line.append(ui.colorize('lightgray', '(%s)' % disambig)) + line.append(ui.colorize('text_highlight_minor', + u'(%s)' % disambig)) - print_(' '.join(line)) + print_(u' '.join(line)) # Ask the user for a choice. if singleton: - opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id', - 'aBort') + opts = (u'Skip', u'Use as-is', u'Enter search', u'enter Id', + u'aBort') else: - opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums', - 'Enter search', 'enter Id', 'aBort') - sel = ui.input_options(opts, numrange=(1, len(candidates))) - if sel == 's': + opts = (u'Skip', u'Use as-is', u'as Tracks', u'Group albums', + u'Enter search', u'enter Id', u'aBort') + sel = ui.input_options(opts + extra_opts, + numrange=(1, len(candidates))) + if sel == u's': return importer.action.SKIP - elif sel == 'u': + elif sel == u'u': return importer.action.ASIS - elif sel == 'm': + elif sel == u'm': pass - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS + elif sel in extra_actions: + return sel else: # Numerical selection. match = candidates[sel - 1] if sel != 1: @@ -625,53 +648,56 @@ # Ask for confirmation. if singleton: - opts = ('Apply', 'More candidates', 'Skip', 'Use as-is', - 'Enter search', 'enter Id', 'aBort') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'Enter search', u'enter Id', u'aBort') else: - opts = ('Apply', 'More candidates', 'Skip', 'Use as-is', - 'as Tracks', 'Group albums', 'Enter search', 'enter Id', - 'aBort') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') default = config['import']['default_action'].as_choice({ - 'apply': 'a', - 'skip': 's', - 'asis': 'u', - 'none': None, + u'apply': u'a', + u'skip': u's', + u'asis': u'u', + u'none': None, }) if default is None: require = True - sel = ui.input_options(opts, require=require, default=default) - if sel == 'a': + sel = ui.input_options(opts + extra_opts, require=require, + default=default) + if sel == u'a': return match - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS - elif sel == 's': + elif sel == u's': return importer.action.SKIP - elif sel == 'u': + elif sel == u'u': return importer.action.ASIS - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID + elif sel in extra_actions: + return sel def manual_search(singleton): """Input either an artist and album (for full albums) or artist and track name (for singletons) for manual search. """ - artist = input_('Artist:') - name = input_('Track:' if singleton else 'Album:') + artist = input_(u'Artist:') + name = input_(u'Track:' if singleton else u'Album:') return artist.strip(), name.strip() def manual_id(singleton): """Input an ID, either for an album ("release") or a track ("recording"). """ - prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release') + prompt = u'Enter {0} ID:'.format(u'recording' if singleton else u'release') return input_(prompt).strip() @@ -689,7 +715,7 @@ u' ({0} items)'.format(len(task.items))) # Take immediate action if appropriate. - action = _summary_judment(task.rec) + action = _summary_judgment(task.rec) if action == importer.action.APPLY: match = task.candidates[0] show_change(task.cur_artist, task.cur_album, match) @@ -700,10 +726,14 @@ # Loop until we have a choice. candidates, rec = task.candidates, task.rec while True: + # Gather extra choices from plugins. + extra_choices = self._get_plugin_choices(task) + extra_ops = {c.short: c.callback for c in extra_choices} + # Ask for a choice from the user. choice = choose_candidate( candidates, False, rec, task.cur_artist, task.cur_album, - itemcount=len(task.items) + itemcount=len(task.items), extra_choices=extra_choices ) # Choose which tags to use. @@ -722,8 +752,14 @@ search_id = manual_id(False) if search_id: _, _, candidates, rec = autotag.tag_album( - task.items, search_id=search_id + task.items, search_ids=search_id.split() ) + elif choice in list(extra_ops.keys()): + # Allow extra ops to automatically set the post-choice. + post_choice = extra_ops[choice](self, task) + if isinstance(post_choice, importer.action): + # MANUAL and MANUAL_ID have no effect, even if returned. + return post_choice else: # We have a candidate! Finish tagging. Here, choice is an # AlbumMatch object. @@ -739,7 +775,7 @@ candidates, rec = task.candidates, task.rec # Take immediate action if appropriate. - action = _summary_judment(task.rec) + action = _summary_judgment(task.rec) if action == importer.action.APPLY: match = candidates[0] show_item_change(task.item, match) @@ -748,8 +784,12 @@ return action while True: + extra_choices = self._get_plugin_choices(task) + extra_ops = {c.short: c.callback for c in extra_choices} + # Ask for a choice. - choice = choose_candidate(candidates, True, rec, item=task.item) + choice = choose_candidate(candidates, True, rec, item=task.item, + extra_choices=extra_choices) if choice in (importer.action.SKIP, importer.action.ASIS): return choice @@ -764,8 +804,14 @@ # Ask for a track ID. search_id = manual_id(True) if search_id: - candidates, rec = autotag.tag_item(task.item, - search_id=search_id) + candidates, rec = autotag.tag_item( + task.item, search_ids=search_id.split()) + elif choice in extra_ops.keys(): + # Allow extra ops to automatically set the post-choice. + post_choice = extra_ops[choice](self, task) + if isinstance(post_choice, importer.action): + # MANUAL and MANUAL_ID have no effect, even if returned. + return post_choice else: # Chose a candidate. assert isinstance(choice, autotag.TrackMatch) @@ -775,37 +821,38 @@ """Decide what to do when a new album or item seems similar to one that's already in the library. """ - log.warn(u"This {0} is already in the library!" - .format("album" if task.is_album else "item")) + log.warn(u"This {0} is already in the library!", + (u"album" if task.is_album else u"item")) if config['import']['quiet']: # In quiet mode, don't prompt -- just skip. log.info(u'Skipping.') - sel = 's' + sel = u's' else: # Print some detail about the existing and new items so the # user can make an informed decision. for duplicate in found_duplicates: - print("Old: " + summarize_items( + print_(u"Old: " + summarize_items( list(duplicate.items()) if task.is_album else [duplicate], not task.is_album, )) - print("New: " + summarize_items( + + print_(u"New: " + summarize_items( task.imported_items(), not task.is_album, )) sel = ui.input_options( - ('Skip new', 'Keep both', 'Remove old') + (u'Skip new', u'Keep both', u'Remove old') ) - if sel == 's': + if sel == u's': # Skip new. task.set_choice(importer.action.SKIP) - elif sel == 'k': + elif sel == u'k': # Keep both. Do nothing; leave the choice intact. pass - elif sel == 'r': + elif sel == u'r': # Remove old. task.should_remove_duplicates = True else: @@ -813,9 +860,53 @@ def should_resume(self, path): return ui.input_yn(u"Import of the directory:\n{0}\n" - "was interrupted. Resume (Y/n)?" + u"was interrupted. Resume (Y/n)?" .format(displayable_path(path))) + def _get_plugin_choices(self, task): + """Get the extra choices appended to the plugins to the ui prompt. + + The `before_choose_candidate` event is sent to the plugins, with + session and task as its parameters. Plugins are responsible for + checking the right conditions and returning a list of `PromptChoice`s, + which is flattened and checked for conflicts. + + If two or more choices have the same short letter, a warning is + emitted and all but one choices are discarded, giving preference + to the default importer choices. + + Returns a list of `PromptChoice`s. + """ + # Send the before_choose_candidate event and flatten list. + extra_choices = list(chain(*plugins.send('before_choose_candidate', + session=self, task=task))) + # Take into account default options, for duplicate checking. + all_choices = [PromptChoice(u'a', u'Apply', None), + PromptChoice(u's', u'Skip', None), + PromptChoice(u'u', u'Use as-is', None), + PromptChoice(u't', u'as Tracks', None), + PromptChoice(u'g', u'Group albums', None), + PromptChoice(u'e', u'Enter search', None), + PromptChoice(u'i', u'enter Id', None), + PromptChoice(u'b', u'aBort', None)] +\ + extra_choices + + short_letters = [c.short for c in all_choices] + if len(short_letters) != len(set(short_letters)): + # Duplicate short letter has been found. + duplicates = [i for i, count in Counter(short_letters).items() + if count > 1] + for short in duplicates: + # Keep the first of the choices, removing the rest. + dup_choices = [c for c in all_choices if c.short == short] + for c in dup_choices[1:]: + log.warn(u"Prompt choice '{0}' removed due to conflict " + u"with '{1}' (short letter: '{2}')", + c.long, dup_choices[0].long, c.short) + extra_choices.remove(c) + return extra_choices + + # The import command. @@ -831,33 +922,26 @@ # Check parameter consistency. if config['import']['quiet'] and config['import']['timid']: - raise ui.UserError("can't be both quiet and timid") + raise ui.UserError(u"can't be both quiet and timid") # Open the log. if config['import']['log'].get() is not None: - logpath = config['import']['log'].as_filename() + logpath = syspath(config['import']['log'].as_filename()) try: - logfile = codecs.open(syspath(logpath), 'a', 'utf8') + loghandler = logging.FileHandler(logpath) except IOError: - raise ui.UserError(u"could not open log file for writing: %s" % - displayable_path(logpath)) - print(u'import started', time.asctime(), file=logfile) + raise ui.UserError(u"could not open log file for writing: " + u"{0}".format(displayable_path(logpath))) else: - logfile = None + loghandler = None # Never ask for input in quiet mode. if config['import']['resume'].get() == 'ask' and \ config['import']['quiet']: config['import']['resume'] = False - session = TerminalImportSession(lib, logfile, paths, query) - try: - session.run() - finally: - # If we were logging, close the file. - if logfile: - print(u'', file=logfile) - logfile.close() + session = TerminalImportSession(lib, loghandler, paths, query) + session.run() # Emit event. plugins.send('import', lib=lib, paths=paths) @@ -878,81 +962,90 @@ query = None paths = args if not paths: - raise ui.UserError('no path specified') + raise ui.UserError(u'no path specified') import_files(lib, paths, query) import_cmd = ui.Subcommand( - 'import', help='import new music', aliases=('imp', 'im') + u'import', help=u'import new music', aliases=(u'imp', u'im') +) +import_cmd.parser.add_option( + u'-c', u'--copy', action='store_true', default=None, + help=u"copy tracks into library directory (default)" ) import_cmd.parser.add_option( - '-c', '--copy', action='store_true', default=None, - help="copy tracks into library directory (default)" + u'-C', u'--nocopy', action='store_false', dest='copy', + help=u"don't copy tracks (opposite of -c)" ) import_cmd.parser.add_option( - '-C', '--nocopy', action='store_false', dest='copy', - help="don't copy tracks (opposite of -c)" + u'-w', u'--write', action='store_true', default=None, + help=u"write new metadata to files' tags (default)" ) import_cmd.parser.add_option( - '-w', '--write', action='store_true', default=None, - help="write new metadata to files' tags (default)" + u'-W', u'--nowrite', action='store_false', dest='write', + help=u"don't write metadata (opposite of -w)" ) import_cmd.parser.add_option( - '-W', '--nowrite', action='store_false', dest='write', - help="don't write metadata (opposite of -w)" + u'-a', u'--autotag', action='store_true', dest='autotag', + help=u"infer tags for imported files (default)" ) import_cmd.parser.add_option( - '-a', '--autotag', action='store_true', dest='autotag', - help="infer tags for imported files (default)" + u'-A', u'--noautotag', action='store_false', dest='autotag', + help=u"don't infer tags for imported files (opposite of -a)" ) import_cmd.parser.add_option( - '-A', '--noautotag', action='store_false', dest='autotag', - help="don't infer tags for imported files (opposite of -a)" + u'-p', u'--resume', action='store_true', default=None, + help=u"resume importing if interrupted" ) import_cmd.parser.add_option( - '-p', '--resume', action='store_true', default=None, - help="resume importing if interrupted" + u'-P', u'--noresume', action='store_false', dest='resume', + help=u"do not try to resume importing" ) import_cmd.parser.add_option( - '-P', '--noresume', action='store_false', dest='resume', - help="do not try to resume importing" + u'-q', u'--quiet', action='store_true', dest='quiet', + help=u"never prompt for input: skip albums instead" ) import_cmd.parser.add_option( - '-q', '--quiet', action='store_true', dest='quiet', - help="never prompt for input: skip albums instead" + u'-l', u'--log', dest='log', + help=u'file to log untaggable albums for later review' ) import_cmd.parser.add_option( - '-l', '--log', dest='log', - help='file to log untaggable albums for later review' + u'-s', u'--singletons', action='store_true', + help=u'import individual tracks instead of full albums' ) import_cmd.parser.add_option( - '-s', '--singletons', action='store_true', - help='import individual tracks instead of full albums' + u'-t', u'--timid', dest='timid', action='store_true', + help=u'always confirm all actions' ) import_cmd.parser.add_option( - '-t', '--timid', dest='timid', action='store_true', - help='always confirm all actions' + u'-L', u'--library', dest='library', action='store_true', + help=u'retag items matching a query' ) import_cmd.parser.add_option( - '-L', '--library', dest='library', action='store_true', - help='retag items matching a query' + u'-i', u'--incremental', dest='incremental', action='store_true', + help=u'skip already-imported directories' ) import_cmd.parser.add_option( - '-i', '--incremental', dest='incremental', action='store_true', - help='skip already-imported directories' + u'-I', u'--noincremental', dest='incremental', action='store_false', + help=u'do not skip already-imported directories' ) import_cmd.parser.add_option( - '-I', '--noincremental', dest='incremental', action='store_false', - help='do not skip already-imported directories' + u'--flat', dest='flat', action='store_true', + help=u'import an entire tree as a single album' ) import_cmd.parser.add_option( - '--flat', dest='flat', action='store_true', - help='import an entire tree as a single album' + u'-g', u'--group-albums', dest='group_albums', action='store_true', + help=u'group tracks in a folder into separate albums' ) import_cmd.parser.add_option( - '-g', '--group-albums', dest='group_albums', action='store_true', - help='group tracks in a folder into separate albums' + u'--pretend', dest='pretend', action='store_true', + help=u'just print the files to import' +) +import_cmd.parser.add_option( + u'-S', u'--search-id', dest='search_ids', action='append', + metavar='BACKEND_ID', + help=u'restrict matching to a specific metadata backend ID' ) import_cmd.func = import_func default_commands.append(import_cmd) @@ -960,40 +1053,26 @@ # list: Query and show library contents. -def list_items(lib, query, album, fmt): +def list_items(lib, query, album, fmt=''): """Print out items in lib matching query. If album, then search for albums instead of single items. """ - tmpl = Template(ui._pick_format(album, fmt)) if album: for album in lib.albums(query): - ui.print_obj(album, lib, tmpl) + ui.print_(format(album, fmt)) else: for item in lib.items(query): - ui.print_obj(item, lib, tmpl) + ui.print_(format(item, fmt)) def list_func(lib, opts, args): - if opts.path: - fmt = '$path' - else: - fmt = opts.format - list_items(lib, decargs(args), opts.album, fmt) + list_items(lib, decargs(args), opts.album) -list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',)) -list_cmd.parser.add_option( - '-a', '--album', action='store_true', - help='show matching albums instead of tracks' -) -list_cmd.parser.add_option( - '-p', '--path', action='store_true', - help='print paths for matched items or albums' -) -list_cmd.parser.add_option( - '-f', '--format', action='store', - help='print with custom format', default=None -) +list_cmd = ui.Subcommand(u'list', help=u'query the library', aliases=(u'ls',)) +list_cmd.parser.usage += u"\n" \ + u'Example: %prog -f \'$album: $title\' artist:beatles' +list_cmd.parser.add_all_common_options() list_cmd.func = list_func default_commands.append(list_cmd) @@ -1012,8 +1091,8 @@ for item in items: # Item deleted? if not os.path.exists(syspath(item.path)): - ui.print_obj(item, lib) - ui.print_(ui.colorize('red', u' deleted')) + ui.print_(format(item)) + ui.print_(ui.colorize('text_error', u' deleted')) if not pretend: item.remove(True) affected_albums.add(item.album_id) @@ -1021,16 +1100,16 @@ # Did the item change since last checked? if item.current_mtime() <= item.mtime: - log.debug(u'skipping {0} because mtime is up to date ({1})' - .format(displayable_path(item.path), item.mtime)) + log.debug(u'skipping {0} because mtime is up to date ({1})', + displayable_path(item.path), item.mtime) continue # Read new data. try: item.read() except library.ReadError as exc: - log.error(u'error reading {0}: {1}'.format( - displayable_path(item.path), exc)) + log.error(u'error reading {0}: {1}', + displayable_path(item.path), exc) continue # Special-case album artist when it matches track artist. (Hacky @@ -1040,7 +1119,7 @@ old_item = lib.get_item(item.id) if old_item.albumartist == old_item.artist == item.artist: item.albumartist = old_item.albumartist - item._dirty.discard('albumartist') + item._dirty.discard(u'albumartist') # Check for and display changes. changed = ui.show_model_changes(item, @@ -1072,7 +1151,7 @@ continue album = lib.get_album(album_id) if not album: # Empty albums have already been removed. - log.debug(u'emptied album {0}'.format(album_id)) + log.debug(u'emptied album {0}', album_id) continue first_item = album.items().get() @@ -1083,32 +1162,31 @@ # Move album art (and any inconsistent items). if move and lib.directory in ancestry(first_item.path): - log.debug(u'moving album {0}'.format(album_id)) + log.debug(u'moving album {0}', album_id) album.move() def update_func(lib, opts, args): - update_items(lib, decargs(args), opts.album, opts.move, opts.pretend) + update_items(lib, decargs(args), opts.album, ui.should_move(opts.move), + opts.pretend) update_cmd = ui.Subcommand( - 'update', help='update the library', aliases=('upd', 'up',) + u'update', help=u'update the library', aliases=(u'upd', u'up',) ) +update_cmd.parser.add_album_option() +update_cmd.parser.add_format_option() update_cmd.parser.add_option( - '-a', '--album', action='store_true', - help='match albums instead of tracks' + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory" ) update_cmd.parser.add_option( - '-M', '--nomove', action='store_false', default=True, dest='move', - help="don't move files in library" + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library" ) update_cmd.parser.add_option( - '-p', '--pretend', action='store_true', - help="show all changes but do nothing" -) -update_cmd.parser.add_option( - '-f', '--format', action='store', - help='print with custom format', default=None + u'-p', u'--pretend', action='store_true', + help=u"show all changes but do nothing" ) update_cmd.func = update_func default_commands.append(update_cmd) @@ -1116,30 +1194,33 @@ # remove: Remove items from library, delete files. -def remove_items(lib, query, album, delete): +def remove_items(lib, query, album, delete, force): """Remove items matching query from lib. If album, then match and remove whole albums. If delete, also remove files from disk. """ # Get the matching items. items, albums = _do_query(lib, query, album) - # Prepare confirmation with user. - print_() - if delete: - fmt = u'$path - $title' - prompt = 'Really DELETE %i files (y/n)?' % len(items) - else: - fmt = None - prompt = 'Really remove %i items from the library (y/n)?' % \ - len(items) + # Confirm file removal if not forcing removal. + if not force: + # Prepare confirmation with user. + print_() + if delete: + fmt = u'$path - $title' + prompt = u'Really DELETE %i file%s (y/n)?' % \ + (len(items), 's' if len(items) > 1 else '') + else: + fmt = '' + prompt = u'Really remove %i item%s from the library (y/n)?' % \ + (len(items), 's' if len(items) > 1 else '') - # Show all the items. - for item in items: - ui.print_obj(item, lib, fmt) + # Show all the items. + for item in items: + ui.print_(format(item, fmt)) - # Confirm with user. - if not ui.input_yn(prompt, True): - return + # Confirm with user. + if not ui.input_yn(prompt, True): + return # Remove (and possibly delete) items. with lib.transaction(): @@ -1148,20 +1229,21 @@ def remove_func(lib, opts, args): - remove_items(lib, decargs(args), opts.album, opts.delete) + remove_items(lib, decargs(args), opts.album, opts.delete, opts.force) remove_cmd = ui.Subcommand( - 'remove', help='remove matching items from the library', aliases=('rm',) + u'remove', help=u'remove matching items from the library', aliases=(u'rm',) ) remove_cmd.parser.add_option( - "-d", "--delete", action="store_true", - help="also remove files from disk" + u"-d", u"--delete", action="store_true", + help=u"also remove files from disk" ) remove_cmd.parser.add_option( - '-a', '--album', action='store_true', - help='match albums instead of tracks' + u"-f", u"--force", action="store_true", + help=u"do not ask when removing items" ) +remove_cmd.parser.add_album_option() remove_cmd.func = remove_func default_commands.append(remove_cmd) @@ -1181,7 +1263,10 @@ for item in items: if exact: - total_size += os.path.getsize(item.path) + try: + total_size += os.path.getsize(syspath(item.path)) + except OSError as exc: + log.info(u'could not get size of {}: {}', item.path, exc) else: total_size += int(item.length * item.bitrate / 8) total_time += item.length @@ -1191,18 +1276,25 @@ if item.album_id: albums.add(item.album_id) - size_str = '' + ui.human_bytes(total_size) + size_str = u'' + ui.human_bytes(total_size) if exact: - size_str += ' ({0} bytes)'.format(total_size) + size_str += u' ({0} bytes)'.format(total_size) - print_("""Tracks: {0} -Total time: {1} ({2:.2f} seconds) -Total size: {3} -Artists: {4} -Albums: {5} -Album artists: {6}""".format(total_items, ui.human_seconds(total_time), - total_time, size_str, len(artists), len(albums), - len(album_artists))) + print_(u"""Tracks: {0} +Total time: {1}{2} +{3}: {4} +Artists: {5} +Albums: {6} +Album artists: {7}""".format( + total_items, + ui.human_seconds(total_time), + u' ({0:.2f} seconds)'.format(total_time) if exact else '', + u'Total size' if exact else u'Approximate total size', + size_str, + len(artists), + len(albums), + len(album_artists)), + ) def stats_func(lib, opts, args): @@ -1210,11 +1302,11 @@ stats_cmd = ui.Subcommand( - 'stats', help='show statistics about the library or a query' + u'stats', help=u'show statistics about the library or a query' ) stats_cmd.parser.add_option( - '-e', '--exact', action='store_true', - help='get exact file sizes' + u'-e', u'--exact', action='store_true', + help=u'exact size and time' ) stats_cmd.func = stats_func default_commands.append(stats_cmd) @@ -1223,17 +1315,17 @@ # version: Show current beets version. def show_version(lib, opts, args): - print_('beets version %s' % beets.__version__) + print_(u'beets version %s' % beets.__version__) # Show plugins. - names = [p.name for p in plugins.find_plugins()] + names = sorted(p.name for p in plugins.find_plugins()) if names: - print_('plugins:', ', '.join(names)) + print_(u'plugins:', ', '.join(names)) else: - print_('no plugins loaded') + print_(u'no plugins loaded') version_cmd = ui.Subcommand( - 'version', help='output version information' + u'version', help=u'output version information' ) version_cmd.func = show_version default_commands.append(version_cmd) @@ -1260,46 +1352,54 @@ # Apply changes *temporarily*, preview them, and collect modified # objects. - print_('Modifying {0} {1}s.' - .format(len(objs), 'album' if album else 'item')) + print_(u'Modifying {0} {1}s.' + .format(len(objs), u'album' if album else u'item')) changed = set() for obj in objs: - obj.update(mods) - for field in dels: - del obj[field] - if ui.show_model_changes(obj): + if print_and_modify(obj, mods, dels): changed.add(obj) # Still something to do? if not changed: - print_('No changes to make.') + print_(u'No changes to make.') return # Confirm action. if confirm: if write and move: - extra = ', move and write tags' + extra = u', move and write tags' elif write: - extra = ' and write tags' + extra = u' and write tags' elif move: - extra = ' and move' + extra = u' and move' else: - extra = '' + extra = u'' - if not ui.input_yn('Really modify%s (Y/n)?' % extra): - return + changed = ui.input_select_objects( + u'Really modify%s' % extra, changed, + lambda o: print_and_modify(o, mods, dels) + ) # Apply changes to database and files with lib.transaction(): for obj in changed: - if move: - cur_path = obj.path - if lib.directory in ancestry(cur_path): # In library? - log.debug(u'moving object {0}' - .format(displayable_path(cur_path))) - obj.move() + obj.try_sync(write, move) - obj.try_sync(write) + +def print_and_modify(obj, mods, dels): + """Print the modifications to an item and return a bool indicating + whether any changes were made. + + `mods` is a dictionary of fields and values to update on the object; + `dels` is a sequence of fields to delete. + """ + obj.update(mods) + for field in dels: + try: + del obj[field] + except KeyError: + pass + return ui.show_model_changes(obj) def modify_parse_args(args): @@ -1324,39 +1424,35 @@ def modify_func(lib, opts, args): query, mods, dels = modify_parse_args(decargs(args)) if not mods and not dels: - raise ui.UserError('no modifications specified') - write = opts.write if opts.write is not None else \ - config['import']['write'].get(bool) - modify_items(lib, mods, dels, query, write, opts.move, opts.album, - not opts.yes) + raise ui.UserError(u'no modifications specified') + modify_items(lib, mods, dels, query, ui.should_write(opts.write), + ui.should_move(opts.move), opts.album, not opts.yes) modify_cmd = ui.Subcommand( - 'modify', help='change metadata fields', aliases=('mod',) -) -modify_cmd.parser.add_option( - '-M', '--nomove', action='store_false', default=True, dest='move', - help="don't move files in library" + u'modify', help=u'change metadata fields', aliases=(u'mod',) ) modify_cmd.parser.add_option( - '-w', '--write', action='store_true', default=None, - help="write new metadata to files' tags (default)" + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory" ) modify_cmd.parser.add_option( - '-W', '--nowrite', action='store_false', dest='write', - help="don't write metadata (opposite of -w)" + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library" ) modify_cmd.parser.add_option( - '-a', '--album', action='store_true', - help='modify whole albums instead of tracks' + u'-w', u'--write', action='store_true', default=None, + help=u"write new metadata to files' tags (default)" ) modify_cmd.parser.add_option( - '-y', '--yes', action='store_true', - help='skip confirmation' + u'-W', u'--nowrite', action='store_false', dest='write', + help=u"don't write metadata (opposite of -w)" ) +modify_cmd.parser.add_album_option() +modify_cmd.parser.add_format_option(target='item') modify_cmd.parser.add_option( - '-f', '--format', action='store', - help='print with custom format', default=None + u'-y', u'--yes', action='store_true', + help=u'skip confirmation' ) modify_cmd.func = modify_func default_commands.append(modify_cmd) @@ -1364,7 +1460,7 @@ # move: Move/copy files to the library or a new base directory. -def move_items(lib, dest, query, copy, album): +def move_items(lib, dest, query, copy, album, pretend, confirm=False): """Moves or copies items to a new base directory, given by dest. If dest is None, then the library's base directory is used, making the command "consolidate" files. @@ -1372,14 +1468,38 @@ items, albums = _do_query(lib, query, album, False) objs = albums if album else items - action = 'Copying' if copy else 'Moving' - entity = 'album' if album else 'item' - log.info(u'{0} {1} {2}s.'.format(action, len(objs), entity)) - for obj in objs: - log.debug(u'moving: {0}'.format(util.displayable_path(obj.path))) + # Filter out files that don't need to be moved. + isitemmoved = lambda item: item.path != item.destination(basedir=dest) + isalbummoved = lambda album: any(isitemmoved(i) for i in album.items()) + objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] + + action = u'Copying' if copy else u'Moving' + act = u'copy' if copy else u'move' + entity = u'album' if album else u'item' + log.info(u'{0} {1} {2}{3}.', action, len(objs), entity, + u's' if len(objs) != 1 else u'') + if not objs: + return + + if pretend: + if album: + show_path_changes([(item.path, item.destination(basedir=dest)) + for obj in objs for item in obj.items()]) + else: + show_path_changes([(obj.path, obj.destination(basedir=dest)) + for obj in objs]) + else: + if confirm: + objs = ui.input_select_objects( + u'Really %s' % act, objs, + lambda o: show_path_changes( + [(o.path, o.destination(basedir=dest))])) + + for obj in objs: + log.debug(u'moving: {0}', util.displayable_path(obj.path)) - obj.move(copy, basedir=dest) - obj.store() + obj.move(copy, basedir=dest) + obj.store() def move_func(lib, opts, args): @@ -1387,26 +1507,32 @@ if dest is not None: dest = normpath(dest) if not os.path.isdir(dest): - raise ui.UserError('no such directory: %s' % dest) + raise ui.UserError(u'no such directory: %s' % dest) - move_items(lib, dest, decargs(args), opts.copy, opts.album) + move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend, + opts.timid) move_cmd = ui.Subcommand( - 'move', help='move or copy items', aliases=('mv',) + u'move', help=u'move or copy items', aliases=(u'mv',) +) +move_cmd.parser.add_option( + u'-d', u'--dest', metavar='DIR', dest='dest', + help=u'destination directory' ) move_cmd.parser.add_option( - '-d', '--dest', metavar='DIR', dest='dest', - help='destination directory' + u'-c', u'--copy', default=False, action='store_true', + help=u'copy instead of moving' ) move_cmd.parser.add_option( - '-c', '--copy', default=False, action='store_true', - help='copy instead of moving' + u'-p', u'--pretend', default=False, action='store_true', + help=u'show how files would be moved, but don\'t touch anything' ) move_cmd.parser.add_option( - '-a', '--album', default=False, action='store_true', - help='match whole albums instead of tracks' + u'-t', u'--timid', dest='timid', action='store_true', + help=u'always confirm all actions' ) +move_cmd.parser.add_album_option() move_cmd.func = move_func default_commands.append(move_cmd) @@ -1422,39 +1548,38 @@ for item in items: # Item deleted? if not os.path.exists(syspath(item.path)): - log.info(u'missing file: {0}'.format( - util.displayable_path(item.path) - )) + log.info(u'missing file: {0}', util.displayable_path(item.path)) continue # Get an Item object reflecting the "clean" (on-disk) state. try: clean_item = library.Item.from_path(item.path) except library.ReadError as exc: - log.error(u'error reading {0}: {1}'.format( - displayable_path(item.path), exc - )) + log.error(u'error reading {0}: {1}', + displayable_path(item.path), exc) continue # Check for and display changes. changed = ui.show_model_changes(item, clean_item, - library.Item._media_fields, force) + library.Item._media_tag_fields, force) if (changed or force) and not pretend: - item.try_sync() + # We use `try_sync` here to keep the mtime up to date in the + # database. + item.try_sync(True, False) def write_func(lib, opts, args): write_items(lib, decargs(args), opts.pretend, opts.force) -write_cmd = ui.Subcommand('write', help='write tag information to files') +write_cmd = ui.Subcommand(u'write', help=u'write tag information to files') write_cmd.parser.add_option( - '-p', '--pretend', action='store_true', - help="show all changes but do nothing" + u'-p', u'--pretend', action='store_true', + help=u"show all changes but do nothing" ) write_cmd.parser.add_option( - '-f', '--force', action='store_true', - help="write tags even if the existing tags match the database" + u'-f', u'--force', action='store_true', + help=u"write tags even if the existing tags match the database" ) write_cmd.func = write_func default_commands.append(write_cmd) @@ -1482,49 +1607,51 @@ filenames.insert(0, user_path) for filename in filenames: - print(filename) + print_(filename) # Open in editor. elif opts.edit: - path = config.user_config_path() - - if 'EDITOR' in os.environ: - editor = os.environ['EDITOR'] - args = [editor, editor, path] - elif platform.system() == 'Darwin': - args = ['open', 'open', '-n', path] - elif platform.system() == 'Windows': - # On windows we can execute arbitrary files. The os will - # take care of starting an appropriate application - args = [path, path] - else: - # Assume Unix - args = ['xdg-open', 'xdg-open', path] - - try: - os.execlp(*args) - except OSError: - raise ui.UserError("Could not edit configuration. Please" - "set the EDITOR environment variable.") + config_edit() # Dump configuration. else: - print(config.dump(full=opts.defaults)) + print_(config.dump(full=opts.defaults, redact=opts.redact)) -config_cmd = ui.Subcommand('config', - help='show or edit the user configuration') +def config_edit(): + """Open a program to edit the user configuration. + An empty config file is created if no existing config file exists. + """ + path = config.user_config_path() + editor = util.editor_command() + try: + if not os.path.isfile(path): + open(path, 'w+').close() + util.interactive_open([path], editor) + except OSError as exc: + message = u"Could not edit configuration: {0}".format(exc) + if not editor: + message += u". Please set the EDITOR environment variable" + raise ui.UserError(message) + +config_cmd = ui.Subcommand(u'config', + help=u'show or edit the user configuration') +config_cmd.parser.add_option( + u'-p', u'--paths', action='store_true', + help=u'show files that configuration was loaded from' +) config_cmd.parser.add_option( - '-p', '--paths', action='store_true', - help='show files that configuration was loaded from' + u'-e', u'--edit', action='store_true', + help=u'edit user configuration with $EDITOR' ) config_cmd.parser.add_option( - '-e', '--edit', action='store_true', - help='edit user configuration with $EDITOR' + u'-d', u'--defaults', action='store_true', + help=u'include the default configuration' ) config_cmd.parser.add_option( - '-d', '--defaults', action='store_true', - help='include the default configuration' + u'-c', u'--clear', action='store_false', + dest='redact', default=True, + help=u'do not redact sensitive fields' ) config_cmd.func = config_func default_commands.append(config_cmd) @@ -1534,7 +1661,7 @@ def print_completion(*args): for line in completion_script(default_commands + plugins.commands()): - print(line, end='') + print_(line, end='') if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)): log.warn(u'Warning: Unable to find the bash-completion package. ' u'Command line completion might not work.') @@ -1542,9 +1669,11 @@ BASH_COMPLETION_PATHS = map(syspath, [ u'/etc/bash_completion', u'/usr/share/bash-completion/bash_completion', - u'/usr/share/local/bash-completion/bash_completion', - u'/opt/local/share/bash-completion/bash_completion', # SmartOS - u'/usr/local/etc/bash_completion', # Homebrew + u'/usr/local/share/bash-completion/bash_completion', + # SmartOS + u'/opt/local/share/bash-completion/bash_completion', + # Homebrew (before bash-completion2) + u'/usr/local/etc/bash_completion', ]) @@ -1584,31 +1713,35 @@ # Add global options options['_global'] = { - 'flags': ['-v', '--verbose'], - 'opts': '-l --library -c --config -d --directory -h --help'.split(' ') + 'flags': [u'-v', u'--verbose'], + 'opts': u'-l --library -c --config -d --directory -h --help'.split( + u' ') } # Add flags common to all commands options['_common'] = { - 'flags': ['-h', '--help'] + 'flags': [u'-h', u'--help'] } # Start generating the script - yield "_beet() {\n" + yield u"_beet() {\n" # Command names - yield " local commands='%s'\n" % ' '.join(command_names) - yield "\n" + yield u" local commands='%s'\n" % ' '.join(command_names) + yield u"\n" # Command aliases - yield " local aliases='%s'\n" % ' '.join(aliases.keys()) + yield u" local aliases='%s'\n" % ' '.join(aliases.keys()) for alias, cmd in aliases.items(): - yield " local alias__%s=%s\n" % (alias, cmd) - yield '\n' + yield u" local alias__%s=%s\n" % (alias, cmd) + yield u'\n' # Fields - yield " fields='%s'\n" % ' '.join( - set(library.Item._fields.keys() + library.Album._fields.keys()) + yield u" fields='%s'\n" % ' '.join( + set( + list(library.Item._fields.keys()) + + list(library.Album._fields.keys()) + ) ) # Command options @@ -1616,15 +1749,16 @@ for option_type, option_list in opts.items(): if option_list: option_list = ' '.join(option_list) - yield " local %s__%s='%s'\n" % (option_type, cmd, option_list) + yield u" local %s__%s='%s'\n" % ( + option_type, cmd, option_list) - yield ' _beet_dispatch\n' - yield '}\n' + yield u' _beet_dispatch\n' + yield u'}\n' completion_cmd = ui.Subcommand( 'completion', - help='print shell script that provides command line completion' + help=u'print shell script that provides command line completion' ) completion_cmd.func = print_completion completion_cmd.hide = True diff -Nru beets-1.3.8+dfsg/beets/ui/__init__.py beets-1.3.19/beets/ui/__init__.py --- beets-1.3.8+dfsg/beets/ui/__init__.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beets/ui/__init__.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -16,14 +17,14 @@ interface. To invoke the CLI, just call beets.ui.main(). The actual CLI commands are implemented in the ui.commands module. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import locale import optparse import textwrap import sys from difflib import SequenceMatcher -import logging import sqlite3 import errno import re @@ -31,13 +32,15 @@ import traceback import os.path +from beets import logging from beets import library from beets import plugins from beets import util from beets.util.functemplate import Template from beets import config -from beets.util import confit +from beets.util import confit, as_string from beets.autotag import mb +from beets.dbcore import query as db_query # On Windows platforms, use colorama to support "ANSI" terminal colors. if sys.platform == 'win32': @@ -56,8 +59,8 @@ PF_KEY_QUERIES = { - 'comp': 'comp:true', - 'singleton': 'singleton:true', + 'comp': u'comp:true', + 'singleton': u'singleton:true', } @@ -67,16 +70,46 @@ """ -# Utilities. +# Encoding utilities. + + +def _in_encoding(): + """Get the encoding to use for *inputting* strings from the console. + """ + return _stream_encoding(sys.stdin) + + +def _out_encoding(): + """Get the encoding to use for *outputting* strings to the console. + """ + return _stream_encoding(sys.stdout) -def _encoding(): - """Tries to guess the encoding used by the terminal.""" + +def _stream_encoding(stream, default='utf8'): + """A helper for `_in_encoding` and `_out_encoding`: get the stream's + preferred encoding, using a configured override or a default + fallback if neither is not specified. + """ # Configured override? encoding = config['terminal_encoding'].get() if encoding: return encoding - # Determine from locale settings. + # For testing: When sys.stdout or sys.stdin is a StringIO under the + # test harness, it doesn't have an `encoding` attribute. Just use + # UTF-8. + if not hasattr(stream, 'encoding'): + return default + + # Python's guessed output stream encoding, or UTF-8 as a fallback + # (e.g., when piped to a file). + return stream.encoding or default + + +def _arg_encoding(): + """Get the encoding for command-line arguments (and other OS + locale-sensitive strings). + """ try: return locale.getdefaultlocale()[1] or 'utf8' except ValueError: @@ -89,25 +122,75 @@ """Given a list of command-line argument bytestrings, attempts to decode them to Unicode strings. """ - return [s.decode(_encoding()) for s in arglist] + return [s.decode(_arg_encoding()) for s in arglist] -def print_(*strings): +def print_(*strings, **kwargs): """Like print, but rather than raising an error when a character is not in the terminal's encoding's character set, just silently replaces it. - """ - if strings: - if isinstance(strings[0], unicode): - txt = u' '.join(strings) - else: - txt = ' '.join(strings) + + If the arguments are strings then they're expected to share the same + type: either bytes or unicode. + + The `end` keyword argument behaves similarly to the built-in `print` + (it defaults to a newline). The value should have the same string + type as the arguments. + """ + end = kwargs.get('end') + + if not strings or isinstance(strings[0], unicode): + txt = u' '.join(strings) + txt += u'\n' if end is None else end else: - txt = u'' + txt = b' '.join(strings) + txt += b'\n' if end is None else end + + # Always send bytes to the stdout stream. if isinstance(txt, unicode): - txt = txt.encode(_encoding(), 'replace') - print(txt) + txt = txt.encode(_out_encoding(), 'replace') + + sys.stdout.write(txt) + +# Configuration wrappers. + +def _bool_fallback(a, b): + """Given a boolean or None, return the original value or a fallback. + """ + if a is None: + assert isinstance(b, bool) + return b + else: + assert isinstance(a, bool) + return a + + +def should_write(write_opt=None): + """Decide whether a command that updates metadata should also write + tags, using the importer configuration as the default. + """ + return _bool_fallback(write_opt, config['import']['write'].get(bool)) + + +def should_move(move_opt=None): + """Decide whether a command that updates metadata should also move + files when they're inside the library, using the importer + configuration as the default. + + Specifically, commands should move files after metadata updates only + when the importer is configured *either* to move *or* to copy files. + They should avoid moving files when the importer is configured not + to touch any filenames. + """ + return _bool_fallback( + move_opt, + config['import']['move'].get(bool) or + config['import']['copy'].get(bool) + ) + + +# Input prompts. def input_(prompt=None): """Like `raw_input`, but decodes the result to a Unicode string. @@ -116,19 +199,17 @@ input cursor. """ # raw_input incorrectly sends prompts to stderr, not stdout, so we - # use print() explicitly to display prompts. + # use print_() explicitly to display prompts. # http://bugs.python.org/issue1927 if prompt: - if isinstance(prompt, unicode): - prompt = prompt.encode(_encoding(), 'replace') - print(prompt, end=' ') + print_(prompt, end=' ') try: resp = raw_input() except EOFError: - raise UserError('stdin stream ended while input required') + raise UserError(u'stdin stream ended while input required') - return resp.decode(sys.stdin.encoding or 'utf8', 'ignore') + return resp.decode(_in_encoding(), 'ignore') def input_options(options, require=False, prompt=None, fallback_prompt=None, @@ -172,7 +253,7 @@ found_letter = letter break else: - raise ValueError('no unambiguous lettering found') + raise ValueError(u'no unambiguous lettering found') letters[found_letter.lower()] = option index = option.index(found_letter) @@ -190,7 +271,7 @@ is_default = False # Colorize the letter shortcut. - show_letter = colorize('turquoise' if is_default else 'blue', + show_letter = colorize('action_default' if is_default else 'action', show_letter) # Insert the highlighted letter back into the word. @@ -216,11 +297,11 @@ prompt_part_lengths = [] if numrange: if isinstance(default, int): - default_name = str(default) - default_name = colorize('turquoise', default_name) + default_name = unicode(default) + default_name = colorize('action_default', default_name) tmpl = '# selection (default %s)' prompt_parts.append(tmpl % default_name) - prompt_part_lengths.append(len(tmpl % str(default))) + prompt_part_lengths.append(len(tmpl % unicode(default))) else: prompt_parts.append('# selection') prompt_part_lengths.append(len(prompt_parts[-1])) @@ -255,9 +336,9 @@ # Make a fallback prompt too. This is displayed if the user enters # something that is not recognized. if not fallback_prompt: - fallback_prompt = 'Enter one of ' + fallback_prompt = u'Enter one of ' if numrange: - fallback_prompt += '%i-%i, ' % numrange + fallback_prompt += u'%i-%i, ' % numrange fallback_prompt += ', '.join(display_letters) + ':' resp = input_(prompt) @@ -296,19 +377,52 @@ "yes" unless `require` is `True`, in which case there is no default. """ sel = input_options( - ('y', 'n'), require, prompt, 'Enter Y or N:' + ('y', 'n'), require, prompt, u'Enter Y or N:' ) - return sel == 'y' + return sel == u'y' + + +def input_select_objects(prompt, objs, rep): + """Prompt to user to choose all, none, or some of the given objects. + Return the list of selected objects. + + `prompt` is the prompt string to use for each question (it should be + phrased as an imperative verb). `rep` is a function to call on each + object to print it out when confirming objects individually. + """ + choice = input_options( + (u'y', u'n', u's'), False, + u'%s? (Yes/no/select)' % prompt) + print() # Blank line. + + if choice == u'y': # Yes. + return objs + elif choice == u's': # Select. + out = [] + for obj in objs: + rep(obj) + if input_yn(u'%s? (yes/no)' % prompt, True): + out.append(obj) + print() # go to a new line + return out + + else: # No. + return [] + + +# Human output formatting. def human_bytes(size): """Formats size, a number of bytes, in a human-readable way.""" - suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB'] - for suffix in suffices: + powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H'] + unit = 'B' + for power in powers: if size < 1024: - return "%3.1f %s" % (size, suffix) + return u"%3.1f %s%s" % (size, power, unit) size /= 1024.0 - return "big" + unit = u'iB' + return u"big" def human_seconds(interval): @@ -316,13 +430,13 @@ interval using English words. """ units = [ - (1, 'second'), - (60, 'minute'), - (60, 'hour'), - (24, 'day'), - (7, 'week'), - (52, 'year'), - (10, 'decade'), + (1, u'second'), + (60, u'minute'), + (60, u'hour'), + (24, u'day'), + (7, u'week'), + (52, u'year'), + (10, u'decade'), ] for i in range(len(units) - 1): increment, suffix = units[i] @@ -335,7 +449,7 @@ increment, suffix = units[-1] interval /= float(increment) - return "%3.1f %ss" % (interval, suffix) + return u"%3.1f %ss" % (interval, suffix) def human_seconds_short(interval): @@ -346,16 +460,45 @@ return u'%i:%02i' % (interval // 60, interval % 60) +# Colorization. + # ANSI terminal colorization code heavily inspired by pygments: # http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py # (pygments is by Tim Hatch, Armin Ronacher, et al.) COLOR_ESCAPE = "\x1b[" -DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue", - "purple", "teal", "lightgray"] -LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue", - "fuchsia", "turquoise", "white"] +DARK_COLORS = { + "black": 0, + "darkred": 1, + "darkgreen": 2, + "brown": 3, + "darkyellow": 3, + "darkblue": 4, + "purple": 5, + "darkmagenta": 5, + "teal": 6, + "darkcyan": 6, + "lightgray": 7 +} +LIGHT_COLORS = { + "darkgray": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "fuchsia": 5, + "magenta": 5, + "turquoise": 6, + "cyan": 6, + "white": 7 +} RESET_COLOR = COLOR_ESCAPE + "39;49;00m" +# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS +# as they are defined in the configuration files, see function: colorize +COLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight', + 'text_highlight_minor', 'action_default', 'action'] +COLORS = None + def _colorize(color, text): """Returns a string that prints the given text in the given color @@ -363,25 +506,36 @@ in DARK_COLORS or LIGHT_COLORS. """ if color in DARK_COLORS: - escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30) + escape = COLOR_ESCAPE + "%im" % (DARK_COLORS[color] + 30) elif color in LIGHT_COLORS: - escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30) + escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS[color] + 30) else: - raise ValueError('no such color %s', color) + raise ValueError(u'no such color %s', color) return escape + text + RESET_COLOR -def colorize(color, text): +def colorize(color_name, text): """Colorize text if colored output is enabled. (Like _colorize but conditional.) """ - if config['color']: + if config['ui']['color']: + global COLORS + if not COLORS: + COLORS = dict((name, config['ui']['colors'][name].get(unicode)) + for name in COLOR_NAMES) + # In case a 3rd party plugin is still passing the actual color ('red') + # instead of the abstract color name ('text_error') + color = COLORS.get(color_name) + if not color: + log.debug(u'Invalid color_name: {0}', color_name) + color = color_name return _colorize(color, text) else: return text -def _colordiff(a, b, highlight='red', minor_highlight='lightgray'): +def _colordiff(a, b, highlight='text_highlight', + minor_highlight='text_highlight_minor'): """Given two values, return the same pair of strings except with their differences highlighted in the specified color. Strings are highlighted intelligently to show differences; other values are @@ -431,40 +585,16 @@ return u''.join(a_out), u''.join(b_out) -def colordiff(a, b, highlight='red'): +def colordiff(a, b, highlight='text_highlight'): """Colorize differences between two values if color is enabled. (Like _colordiff but conditional.) """ - if config['color']: + if config['ui']['color']: return _colordiff(a, b, highlight) else: return unicode(a), unicode(b) -def color_diff_suffix(a, b, highlight='red'): - """Colorize the differing suffix between two strings.""" - a, b = unicode(a), unicode(b) - if not config['color']: - return a, b - - # Fast path. - if a == b: - return a, b - - # Find the longest common prefix. - first_diff = None - for i in range(min(len(a), len(b))): - if a[i] != b[i]: - first_diff = i - break - else: - first_diff = min(len(a), len(b)) - - # Colorize from the first difference on. - return (a[:first_diff] + colorize(highlight, a[first_diff:]), - b[:first_diff] + colorize(highlight, b[first_diff:])) - - def get_path_formats(subview=None): """Get the configuration's path formats as a list of query/template pairs. @@ -494,31 +624,6 @@ return replacements -def _pick_format(album, fmt=None): - """Pick a format string for printing Album or Item objects, - falling back to config options and defaults. - """ - if fmt: - return fmt - if album: - return config['list_format_album'].get(unicode) - else: - return config['list_format_item'].get(unicode) - - -def print_obj(obj, lib, fmt=None): - """Print an Album or Item object. If `fmt` is specified, use that - format string. Otherwise, use the configured template. - """ - album = isinstance(obj, library.Album) - fmt = _pick_format(album, fmt) - if isinstance(fmt, Template): - template = fmt - else: - template = Template(fmt) - print_(obj.evaluate_template(template)) - - def term_width(): """Get the width (columns) of the terminal.""" fallback = config['ui']['terminal_width'].get(int) @@ -569,7 +674,8 @@ if isinstance(oldval, basestring): oldstr, newstr = colordiff(oldval, newstr) else: - oldstr, newstr = colorize('red', oldstr), colorize('red', newstr) + oldstr = colorize('text_error', oldstr) + newstr = colorize('text_error', newstr) return u'{0} -> {1}'.format(oldstr, newstr) @@ -605,18 +711,178 @@ changes.append(u' {0}: {1}'.format( field, - colorize('red', new.formatted()[field]) + colorize('text_highlight', new.formatted()[field]) )) # Print changes. if changes or always: - print_obj(old, old._db) + print_(format(old)) if changes: print_(u'\n'.join(changes)) return bool(changes) +def show_path_changes(path_changes): + """Given a list of tuples (source, destination) that indicate the + path changes, log the changes as INFO-level output to the beets log. + The output is guaranteed to be unicode. + + Every pair is shown on a single line if the terminal width permits it, + else it is split over two lines. E.g., + + Source -> Destination + + vs. + + Source + -> Destination + """ + sources, destinations = zip(*path_changes) + + # Ensure unicode output + sources = list(map(util.displayable_path, sources)) + destinations = list(map(util.displayable_path, destinations)) + + # Calculate widths for terminal split + col_width = (term_width() - len(' -> ')) // 2 + max_width = len(max(sources + destinations, key=len)) + + if max_width > col_width: + # Print every change over two lines + for source, dest in zip(sources, destinations): + log.info(u'{0} \n -> {1}', source, dest) + else: + # Print every change on a single line, and add a header + title_pad = max_width - len('Source ') + len(' -> ') + + log.info(u'Source {0} Destination', ' ' * title_pad) + for source, dest in zip(sources, destinations): + pad = max_width - len(source) + log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest) + + +class CommonOptionsParser(optparse.OptionParser, object): + """Offers a simple way to add common formatting options. + + Options available include: + - matching albums instead of tracks: add_album_option() + - showing paths instead of items/albums: add_path_option() + - changing the format of displayed items/albums: add_format_option() + + The last one can have several behaviors: + - against a special target + - with a certain format + - autodetected target with the album option + + Each method is fully documented in the related method. + """ + def __init__(self, *args, **kwargs): + super(CommonOptionsParser, self).__init__(*args, **kwargs) + self._album_flags = False + # this serves both as an indicator that we offer the feature AND allows + # us to check whether it has been specified on the CLI - bypassing the + # fact that arguments may be in any order + + def add_album_option(self, flags=('-a', '--album')): + """Add a -a/--album option to match albums instead of tracks. + + If used then the format option can auto-detect whether we're setting + the format for items or albums. + Sets the album property on the options extracted from the CLI. + """ + album = optparse.Option(*flags, action='store_true', + help=u'match albums instead of tracks') + self.add_option(album) + self._album_flags = set(flags) + + def _set_format(self, option, opt_str, value, parser, target=None, + fmt=None, store_true=False): + """Internal callback that sets the correct format while parsing CLI + arguments. + """ + if store_true: + setattr(parser.values, option.dest, True) + + # Use the explicitly specified format, or the string from the option. + if fmt: + value = fmt + elif value: + value, = decargs([value]) + else: + value = u'' + + parser.values.format = value + if target: + config[target._format_config_key].set(value) + else: + if self._album_flags: + if parser.values.album: + target = library.Album + else: + # the option is either missing either not parsed yet + if self._album_flags & set(parser.rargs): + target = library.Album + else: + target = library.Item + config[target._format_config_key].set(value) + else: + config[library.Item._format_config_key].set(value) + config[library.Album._format_config_key].set(value) + + def add_path_option(self, flags=('-p', '--path')): + """Add a -p/--path option to display the path instead of the default + format. + + By default this affects both items and albums. If add_album_option() + is used then the target will be autodetected. + + Sets the format property to u'$path' on the options extracted from the + CLI. + """ + path = optparse.Option(*flags, nargs=0, action='callback', + callback=self._set_format, + callback_kwargs={'fmt': '$path', + 'store_true': True}, + help=u'print paths for matched items or albums') + self.add_option(path) + + def add_format_option(self, flags=('-f', '--format'), target=None): + """Add -f/--format option to print some LibModel instances with a + custom format. + + `target` is optional and can be one of ``library.Item``, 'item', + ``library.Album`` and 'album'. + + Several behaviors are available: + - if `target` is given then the format is only applied to that + LibModel + - if the album option is used then the target will be autodetected + - otherwise the format is applied to both items and albums. + + Sets the format property on the options extracted from the CLI. + """ + kwargs = {} + if target: + if isinstance(target, basestring): + target = {'item': library.Item, + 'album': library.Album}[target] + kwargs['target'] = target + + opt = optparse.Option(*flags, action='callback', + callback=self._set_format, + callback_kwargs=kwargs, + help=u'print with custom format') + self.add_option(opt) + + def add_all_common_options(self): + """Add album, path and format options. + """ + self.add_album_option() + self.add_path_option() + self.add_format_option() + + # Subcommand parsing infrastructure. # # This is a fairly generic subcommand parser for optparse. It is @@ -634,10 +900,10 @@ the subcommand; aliases are alternate names. parser is an OptionParser responsible for parsing the subcommand's options. help is a short description of the command. If no parser is - given, it defaults to a new, empty OptionParser. + given, it defaults to a new, empty CommonOptionsParser. """ self.name = name - self.parser = parser or optparse.OptionParser() + self.parser = parser or CommonOptionsParser() self.aliases = aliases self.help = help self.hide = hide @@ -656,11 +922,11 @@ @root_parser.setter def root_parser(self, root_parser): self._root_parser = root_parser - self.parser.prog = '{0} {1}'.format(root_parser.get_prog_name(), - self.name) + self.parser.prog = '{0} {1}'.format( + as_string(root_parser.get_prog_name()), self.name) -class SubcommandsOptionParser(optparse.OptionParser): +class SubcommandsOptionParser(CommonOptionsParser): """A variant of OptionParser that parses subcommands and their arguments. """ @@ -672,13 +938,13 @@ """ # A more helpful default usage. if 'usage' not in kwargs: - kwargs['usage'] = """ + kwargs['usage'] = u""" %prog COMMAND [ARGS...] %prog help COMMAND""" kwargs['add_help_option'] = False # Super constructor. - optparse.OptionParser.__init__(self, *args, **kwargs) + super(SubcommandsOptionParser, self).__init__(*args, **kwargs) # Our root parser needs to stop on the first unrecognized argument. self.disable_interspersed_args() @@ -695,7 +961,7 @@ # Add the list of subcommands to the help message. def format_help(self, formatter=None): # Get the original help message, to which we will append. - out = optparse.OptionParser.format_help(self, formatter) + out = super(SubcommandsOptionParser, self).format_help(formatter) if formatter is None: formatter = self.formatter @@ -735,7 +1001,8 @@ result.append(name) help_width = formatter.width - help_position help_lines = textwrap.wrap(subcommand.help, help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + help_line = help_lines[0] if help_lines else '' + result.append("%*s%s\n" % (indent_first, "", help_line)) result.extend(["%*s%s\n" % (help_position, "", line) for line in help_lines[1:]]) formatter.dedent() @@ -780,7 +1047,7 @@ cmdname = args.pop(0) subcommand = self._subcommand_for_name(cmdname) if not subcommand: - raise UserError("unknown command '{0}'".format(cmdname)) + raise UserError(u"unknown command '{0}'".format(cmdname)) suboptions, subargs = subcommand.parse_args(args) return subcommand, suboptions, subargs @@ -831,7 +1098,8 @@ """Load the plugins specified in the configuration. """ paths = config['pluginpath'].get(confit.StrSeq(split=False)) - paths = map(util.normpath, paths) + paths = list(map(util.normpath, paths)) + log.debug(u'plugin paths: {0}', util.displayable_path(paths)) import beetsplug beetsplug.__path__ = paths + beetsplug.__path__ @@ -855,22 +1123,17 @@ plugins = _load_plugins(config) - # Temporary: Migrate from 1.0-style configuration. - from beets.ui import migrate - migrate.automigrate() - # Get the default subcommands. from beets.ui.commands import default_commands subcommands = list(default_commands) - subcommands.append(migrate.migrate_cmd) subcommands.extend(plugins.commands()) if lib is None: lib = _open_library(config) plugins.send("library_opened", lib=lib) - library.Item._types = plugins.types(library.Item) - library.Album._types = plugins.types(library.Album) + library.Item._types.update(plugins.types(library.Item)) + library.Album._types.update(plugins.types(library.Album)) return subcommands, plugins, lib @@ -888,21 +1151,43 @@ config.set_args(options) # Configure the logger. - if config['verbose'].get(bool): - log.setLevel(logging.DEBUG) + if config['verbose'].get(int): + log.set_global_level(logging.DEBUG) else: - log.setLevel(logging.INFO) + log.set_global_level(logging.INFO) + + # Ensure compatibility with old (top-level) color configuration. + # Deprecation msg to motivate user to switch to config['ui']['color]. + if config['color'].exists(): + log.warning(u'Warning: top-level configuration of `color` ' + u'is deprecated. Configure color use under `ui`. ' + u'See documentation for more info.') + config['ui']['color'].set(config['color'].get(bool)) + + # Compatibility from list_format_{item,album} to format_{item,album} + for elem in ('item', 'album'): + old_key = 'list_format_{0}'.format(elem) + if config[old_key].exists(): + new_key = 'format_{0}'.format(elem) + log.warning( + u'Warning: configuration uses "{0}" which is deprecated' + u' in favor of "{1}" now that it affects all commands. ' + u'See changelog & documentation.', + old_key, + new_key, + ) + config[new_key].set(config[old_key]) config_path = config.user_config_path() if os.path.isfile(config_path): - log.debug(u'user configuration: {0}'.format( - util.displayable_path(config_path))) + log.debug(u'user configuration: {0}', + util.displayable_path(config_path)) else: - log.debug(u'no user configuration found at {0}'.format( - util.displayable_path(config_path))) + log.debug(u'no user configuration found at {0}', + util.displayable_path(config_path)) - log.debug(u'data directory: {0}' - .format(util.displayable_path(config.config_dir()))) + log.debug(u'data directory: {0}', + util.displayable_path(config.config_dir())) return config @@ -917,14 +1202,16 @@ get_path_formats(), get_replacements(), ) - except sqlite3.OperationalError: + lib.get_item(0) # Test database connection. + except (sqlite3.OperationalError, sqlite3.DatabaseError): + log.debug(u'{}', traceback.format_exc()) raise UserError(u"database file {0} could not be opened".format( util.displayable_path(dbpath) )) log.debug(u'library database: {0}\n' - u'library directory: {1}' - .format(util.displayable_path(lib.path), - util.displayable_path(lib.directory))) + u'library directory: {1}', + util.displayable_path(lib.path), + util.displayable_path(lib.directory)) return lib @@ -933,20 +1220,32 @@ handling. """ parser = SubcommandsOptionParser() + parser.add_format_option(flags=('--format-item',), target=library.Item) + parser.add_format_option(flags=('--format-album',), target=library.Album) parser.add_option('-l', '--library', dest='library', - help='library database file to use') + help=u'library database file to use') parser.add_option('-d', '--directory', dest='directory', - help="destination music directory") - parser.add_option('-v', '--verbose', dest='verbose', action='store_true', - help='print debugging information') + help=u"destination music directory") + parser.add_option('-v', '--verbose', dest='verbose', action='count', + help=u'log more details (use twice for even more)') parser.add_option('-c', '--config', dest='config', - help='path to configuration file') + help=u'path to configuration file') parser.add_option('-h', '--help', dest='help', action='store_true', - help='how this help message and exit') + help=u'show this help message and exit') parser.add_option('--version', dest='version', action='store_true', help=optparse.SUPPRESS_HELP) options, subargs = parser.parse_global_options(args) + + # Special case for the `config --edit` command: bypass _setup so + # that an invalid configuration does not prevent the editor from + # starting. + if subargs and subargs[0] == 'config' \ + and ('-e' in subargs or '--edit' in subargs): + from beets.ui.commands import config_edit + return config_edit() + + test_lib = bool(lib) subcommands, plugins, lib = _setup(options, lib) parser.add_subcommand(*subcommands) @@ -954,6 +1253,9 @@ subcommand.func(lib, suboptions, subargs) plugins.send('cli_exit', lib=lib) + if not test_lib: + # Clean up the library unless it came from the test harness. + lib._close() def main(args=None): @@ -964,7 +1266,7 @@ _raw_main(args) except UserError as exc: message = exc.args[0] if exc.args else None - log.error(u'error: {0}'.format(message)) + log.error(u'error: {0}', message) sys.exit(1) except util.HumanReadableException as exc: exc.log(log) @@ -972,11 +1274,14 @@ except library.FileOperationError as exc: # These errors have reasonable human-readable descriptions, but # we still want to log their tracebacks for debugging. - log.debug(traceback.format_exc()) - log.error(exc) + log.debug('{}', traceback.format_exc()) + log.error('{}', exc) sys.exit(1) except confit.ConfigError as exc: - log.error(u'configuration error: {0}'.format(exc)) + log.error(u'configuration error: {0}', exc) + sys.exit(1) + except db_query.InvalidQueryError as exc: + log.error(u'invalid query: {0}', exc) sys.exit(1) except IOError as exc: if exc.errno == errno.EPIPE: @@ -986,4 +1291,4 @@ raise except KeyboardInterrupt: # Silently ignore ^C except in verbose mode. - log.debug(traceback.format_exc()) + log.debug(u'{}', traceback.format_exc()) diff -Nru beets-1.3.8+dfsg/beets/ui/migrate.py beets-1.3.19/beets/ui/migrate.py --- beets-1.3.8+dfsg/beets/ui/migrate.py 2014-04-12 02:41:18.000000000 +0000 +++ beets-1.3.19/beets/ui/migrate.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,414 +0,0 @@ -# This file is part of beets. -# Copyright 2013, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Conversion from legacy (pre-1.1) configuration to Confit/YAML -configuration. -""" -import os -import ConfigParser -import codecs -import yaml -import logging -import time -import itertools -import re - -import beets -from beets import util -from beets import ui -from beets.util import confit - -CONFIG_PATH_VAR = 'BEETSCONFIG' -DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig' -DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini' -DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb' -DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb' -WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~' - -OLD_CONFIG_SUFFIX = '.old' -PLUGIN_NAMES = { - 'rdm': 'random', - 'fuzzy_search': 'fuzzy', -} -AUTO_KEYS = ('automatic', 'autofetch', 'autoembed', 'autoscrub') -IMPORTFEEDS_PREFIX = 'feeds_' -CONFIG_MIGRATED_MESSAGE = u""" -You appear to be upgrading from beets 1.0 (or earlier) to 1.1. Your -configuration file has been migrated automatically to: -{newconfig} -Edit this file to configure beets. You might want to remove your -old-style ".beetsconfig" file now. See the documentation for more -details on the new configuration system: -http://beets.readthedocs.org/page/reference/config.html -""".strip() -DB_MIGRATED_MESSAGE = u'Your database file has also been copied to:\n{newdb}' -YAML_COMMENT = '# Automatically migrated from legacy .beetsconfig.\n\n' - -log = logging.getLogger('beets') - - -# An itertools recipe. -def grouper(n, iterable): - args = [iter(iterable)] * n - return itertools.izip_longest(*args) - - -def _displace(fn): - """Move a file aside using a timestamp suffix so a new file can be - put in its place. - """ - util.move( - fn, - u'{0}.old.{1}'.format(fn, int(time.time())), - True - ) - - -def default_paths(): - """Produces the appropriate default config and library database - paths for the current system. On Unix, this is always in ~. On - Windows, tries ~ first and then $APPDATA for the config and library - files (for backwards compatibility). - """ - windows = os.path.__name__ == 'ntpath' - if windows: - windata = os.environ.get('APPDATA') or '~' - - # Shorthand for joining paths. - def exp(*vals): - return os.path.expanduser(os.path.join(*vals)) - - config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX) - if windows and not os.path.exists(config): - config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS) - - libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX) - if windows and not os.path.exists(libpath): - libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS) - - return config, libpath - - -def get_config(): - """Using the same logic as beets 1.0, locate and read the - .beetsconfig file. Return a ConfigParser instance or None if no - config is found. - """ - default_config, default_libpath = default_paths() - if CONFIG_PATH_VAR in os.environ: - configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR]) - else: - configpath = default_config - - config = ConfigParser.SafeConfigParser() - if os.path.exists(util.syspath(configpath)): - with codecs.open(configpath, 'r', encoding='utf-8') as f: - config.readfp(f) - return config, configpath - else: - return None, configpath - - -def flatten_config(config): - """Given a ConfigParser, flatten the values into a dict-of-dicts - representation where each section gets its own dictionary of values. - """ - out = confit.OrderedDict() - for section in config.sections(): - sec_dict = out[section] = confit.OrderedDict() - for option in config.options(section): - sec_dict[option] = config.get(section, option, True) - return out - - -def transform_value(value): - """Given a string read as the value of a config option, return a - massaged version of that value (possibly with a different type). - """ - # Booleans. - if value.lower() in ('false', 'no', 'off'): - return False - elif value.lower() in ('true', 'yes', 'on'): - return True - - # Integers. - try: - return int(value) - except ValueError: - pass - - # Floats. - try: - return float(value) - except ValueError: - pass - - return value - - -def transform_data(data): - """Given a dict-of-dicts representation of legacy config data, tweak - the data into a new form. This new form is suitable for dumping as - YAML. - """ - out = confit.OrderedDict() - - for section, pairs in data.items(): - if section == 'beets': - # The "main" section. In the new config system, these values - # are in the "root": no section at all. - for key, value in pairs.items(): - value = transform_value(value) - - if key.startswith('import_'): - # Importer config is now under an "import:" key. - if 'import' not in out: - out['import'] = confit.OrderedDict() - out['import'][key[7:]] = value - - elif key == 'plugins': - # Renamed plugins. - plugins = value.split() - new_plugins = [PLUGIN_NAMES.get(p, p) for p in plugins] - out['plugins'] = ' '.join(new_plugins) - - elif key == 'replace': - # YAMLy representation for character replacements. - replacements = confit.OrderedDict() - for pat, repl in grouper(2, value.split()): - if repl == '': - repl = '' - replacements[pat] = repl - out['replace'] = replacements - - elif key == 'pluginpath': - # Used to be a colon-separated string. Now a list. - out['pluginpath'] = value.split(':') - - else: - out[key] = value - - elif pairs: - # Other sections (plugins, etc). - sec_out = out[section] = confit.OrderedDict() - for key, value in pairs.items(): - - # Standardized "auto" option. - if key in AUTO_KEYS: - key = 'auto' - - # Unnecessary : hack in queries. - if section == 'paths': - key = key.replace('_', ':') - - # Changed option names for importfeeds plugin. - if section == 'importfeeds': - if key.startswith(IMPORTFEEDS_PREFIX): - key = key[len(IMPORTFEEDS_PREFIX):] - - sec_out[key] = transform_value(value) - - return out - - -class Dumper(yaml.SafeDumper): - """A PyYAML Dumper that represents OrderedDicts as ordinary mappings - (in order, of course). - """ - # From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = yaml.MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = list(mapping.items()) - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, yaml.ScalarNode) and - not node_key.style): - best_style = False - if not (isinstance(node_value, yaml.ScalarNode) and - not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node -Dumper.add_representer(confit.OrderedDict, Dumper.represent_dict) - - -def migrate_config(replace=False): - """Migrate a legacy beetsconfig file to a new-style config.yaml file - in an appropriate place. If `replace` is enabled, then any existing - config.yaml will be moved aside. Otherwise, the process is aborted - when the file exists. - """ - - # Load legacy configuration data, if any. - config, configpath = get_config() - if not config: - log.debug(u'no config file found at {0}'.format( - util.displayable_path(configpath) - )) - return - - # Get the new configuration file path and possibly move it out of - # the way. - destfn = os.path.join(beets.config.config_dir(), confit.CONFIG_FILENAME) - if os.path.exists(destfn): - if replace: - log.debug(u'moving old config aside: {0}'.format( - util.displayable_path(destfn) - )) - _displace(destfn) - else: - # File exists and we won't replace it. We're done. - return - - log.debug(u'migrating config file {0}'.format( - util.displayable_path(configpath) - )) - - # Convert the configuration to a data structure ready to be dumped - # as the new Confit file. - data = transform_data(flatten_config(config)) - - # Encode result as YAML. - yaml_out = yaml.dump( - data, - Dumper=Dumper, - default_flow_style=False, - indent=4, - width=1000, - ) - # A ridiculous little hack to add some whitespace between "sections" - # in the YAML output. I hope this doesn't break any YAML syntax. - yaml_out = re.sub(r'(\n\w+:\n [^-\s])', '\n\\1', yaml_out) - yaml_out = YAML_COMMENT + yaml_out - - # Write the data to the new config destination. - log.debug(u'writing migrated config to {0}'.format( - util.displayable_path(destfn) - )) - with open(destfn, 'w') as f: - f.write(yaml_out) - return destfn - - -def migrate_db(replace=False): - """Copy the beets library database file to the new location (e.g., - from ~/.beetsmusic.blb to ~/.config/beets/library.db). - """ - _, srcfn = default_paths() - destfn = beets.config['library'].as_filename() - - if not os.path.exists(srcfn) or srcfn == destfn: - # Old DB does not exist or we're configured to point to the same - # database. Do nothing. - return - - if os.path.exists(destfn): - if replace: - log.debug(u'moving old database aside: {0}'.format( - util.displayable_path(destfn) - )) - _displace(destfn) - else: - return - - log.debug(u'copying database from {0} to {1}'.format( - util.displayable_path(srcfn), util.displayable_path(destfn) - )) - util.copy(srcfn, destfn) - return destfn - - -def migrate_state(replace=False): - """Copy the beets runtime state file from the old path (i.e., - ~/.beetsstate) to the new path (i.e., ~/.config/beets/state.pickle). - """ - srcfn = os.path.expanduser(os.path.join('~', '.beetsstate')) - if not os.path.exists(srcfn): - return - - destfn = beets.config['statefile'].as_filename() - if os.path.exists(destfn): - if replace: - _displace(destfn) - else: - return - - log.debug(u'copying state file from {0} to {1}'.format( - util.displayable_path(srcfn), util.displayable_path(destfn) - )) - util.copy(srcfn, destfn) - return destfn - - -# Automatic migration when beets starts. - -def automigrate(): - """Migrate the configuration, database, and state files. If any - migration occurs, print out a notice with some helpful next steps. - """ - config_fn = migrate_config() - db_fn = migrate_db() - migrate_state() - - if config_fn: - ui.print_(ui.colorize('fuchsia', u'MIGRATED CONFIGURATION')) - - ui.print_(CONFIG_MIGRATED_MESSAGE.format( - newconfig=util.displayable_path(config_fn)) - ) - if db_fn: - ui.print_(DB_MIGRATED_MESSAGE.format( - newdb=util.displayable_path(db_fn) - )) - - ui.input_(ui.colorize('fuchsia', u'Press ENTER to continue:')) - ui.print_() - - -# CLI command for explicit migration. - -migrate_cmd = ui.Subcommand('migrate', help='convert legacy config') - - -def migrate_func(lib, opts, args): - """Explicit command for migrating files. Existing files in each - destination are moved aside. - """ - config_fn = migrate_config(replace=True) - if config_fn: - log.info(u'Migrated configuration to: {0}'.format( - util.displayable_path(config_fn) - )) - db_fn = migrate_db(replace=True) - if db_fn: - log.info(u'Migrated library database to: {0}'.format( - util.displayable_path(db_fn) - )) - state_fn = migrate_state(replace=True) - if state_fn: - log.info(u'Migrated state file to: {0}'.format( - util.displayable_path(state_fn) - )) -migrate_cmd.func = migrate_func diff -Nru beets-1.3.8+dfsg/beets/util/artresizer.py beets-1.3.19/beets/util/artresizer.py --- beets-1.3.8+dfsg/beets/util/artresizer.py 2014-04-10 18:18:13.000000000 +0000 +++ beets-1.3.19/beets/util/artresizer.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Fabrice Laporte +# Copyright 2016, Fabrice Laporte # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,11 +16,15 @@ """Abstraction layer to resize images using PIL, ImageMagick, or a public resizing proxy if neither is available. """ +from __future__ import division, absolute_import, print_function + import urllib import subprocess import os +import re from tempfile import NamedTemporaryFile -import logging + +from beets import logging from beets import util # Resizing methods @@ -38,7 +43,7 @@ """ return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({ 'url': url.replace('http://', ''), - 'w': str(maxwidth), + 'w': bytes(maxwidth), })) @@ -57,9 +62,8 @@ """ path_out = path_out or temp_file_for(path_in) from PIL import Image - log.debug(u'artresizer: PIL resizing {0} to {1}'.format( - util.displayable_path(path_in), util.displayable_path(path_out) - )) + log.debug(u'artresizer: PIL resizing {0} to {1}', + util.displayable_path(path_in), util.displayable_path(path_out)) try: im = Image.open(util.syspath(path_in)) @@ -68,20 +72,18 @@ im.save(path_out) return path_out except IOError: - log.error(u"PIL cannot create thumbnail for '{0}'".format( - util.displayable_path(path_in) - )) + log.error(u"PIL cannot create thumbnail for '{0}'", + util.displayable_path(path_in)) return path_in def im_resize(maxwidth, path_in, path_out=None): """Resize using ImageMagick's ``convert`` tool. - tool. Return the output path of resized image. + Return the output path of resized image. """ path_out = path_out or temp_file_for(path_in) - log.debug(u'artresizer: ImageMagick resizing {0} to {1}'.format( - util.displayable_path(path_in), util.displayable_path(path_out) - )) + log.debug(u'artresizer: ImageMagick resizing {0} to {1}', + util.displayable_path(path_in), util.displayable_path(path_out)) # "-resize widthxheight>" shrinks images with dimension(s) larger # than the corresponding width and/or height dimension(s). The > @@ -89,13 +91,13 @@ # compatibility. try: util.command_output([ - 'convert', util.syspath(path_in), - '-resize', '{0}x^>'.format(maxwidth), path_out + b'convert', util.syspath(path_in, prefix=False), + b'-resize', b'{0}x^>'.format(maxwidth), + util.syspath(path_out, prefix=False), ]) except subprocess.CalledProcessError: - log.warn(u'artresizer: IM convert failed for {0}'.format( - util.displayable_path(path_in) - )) + log.warn(u'artresizer: IM convert failed for {0}', + util.displayable_path(path_in)) return path_in return path_out @@ -106,21 +108,56 @@ } +def pil_getsize(path_in): + from PIL import Image + try: + im = Image.open(util.syspath(path_in)) + return im.size + except IOError as exc: + log.error(u"PIL could not read file {}: {}", + util.displayable_path(path_in), exc) + + +def im_getsize(path_in): + cmd = [b'identify', b'-format', b'%w %h', + util.syspath(path_in, prefix=False)] + try: + out = util.command_output(cmd) + except subprocess.CalledProcessError as exc: + log.warn(u'ImageMagick size query failed') + log.debug( + u'`convert` exited with (status {}) when ' + u'getting size with command {}:\n{}', + exc.returncode, cmd, exc.output.strip() + ) + return + try: + return tuple(map(int, out.split(b' '))) + except IndexError: + log.warn(u'Could not understand IM output: {0!r}', out) + + +BACKEND_GET_SIZE = { + PIL: pil_getsize, + IMAGEMAGICK: im_getsize, +} + + class Shareable(type): """A pseudo-singleton metaclass that allows both shared and non-shared instances. The ``MyClass.shared`` property holds a lazily-created shared instance of ``MyClass`` while calling ``MyClass()`` to construct a new object works as usual. """ - def __init__(cls, name, bases, dict): - super(Shareable, cls).__init__(name, bases, dict) - cls._instance = None + def __init__(self, name, bases, dict): + super(Shareable, self).__init__(name, bases, dict) + self._instance = None @property - def shared(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance + def shared(self): + if self._instance is None: + self._instance = self() + return self._instance class ArtResizer(object): @@ -128,12 +165,12 @@ """ __metaclass__ = Shareable - def __init__(self, method=None): - """Create a resizer object for the given method or, if none is - specified, with an inferred method. + def __init__(self): + """Create a resizer object with an inferred method. """ - self.method = method or self._guess_method() - log.debug(u"artresizer: method is {0}".format(self.method)) + self.method = self._check_method() + log.debug(u"artresizer: method is {0}", self.method) + self.can_compare = self._can_compare() def resize(self, maxwidth, path_in, path_out=None): """Manipulate an image file according to the method, returning a @@ -141,7 +178,7 @@ temporary file. For WEBPROXY, returns `path_in` unmodified. """ if self.local: - func = BACKEND_FUNCS[self.method] + func = BACKEND_FUNCS[self.method[0]] return func(maxwidth, path_in, path_out) else: return path_in @@ -159,30 +196,65 @@ @property def local(self): """A boolean indicating whether the resizing method is performed - locally (i.e., PIL or IMAGEMAGICK). + locally (i.e., PIL or ImageMagick). """ - return self.method in BACKEND_FUNCS + return self.method[0] in BACKEND_FUNCS - @staticmethod - def _guess_method(): - """Determine which resizing method to use. Returns PIL, - IMAGEMAGICK, or WEBPROXY depending on available dependencies. + def get_size(self, path_in): + """Return the size of an image file as an int couple (width, height) + in pixels. + + Only available locally """ - # Try importing PIL. - try: - __import__('PIL', fromlist=['Image']) - return PIL - except ImportError: - pass - - # Try invoking ImageMagick's "convert". - try: - out = util.command_output(['convert', '--version']) - if 'imagemagick' in out.lower(): - # system32/convert.exe may be interfering - return IMAGEMAGICK - except (subprocess.CalledProcessError, OSError): - pass + if self.local: + func = BACKEND_GET_SIZE[self.method[0]] + return func(path_in) + + def _can_compare(self): + """A boolean indicating whether image comparison is available""" - # Fall back to Web proxy method. - return WEBPROXY + return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7) + + @staticmethod + def _check_method(): + """Return a tuple indicating an available method and its version.""" + version = get_im_version() + if version: + return IMAGEMAGICK, version + + version = get_pil_version() + if version: + return PIL, version + + return WEBPROXY, (0) + + +def get_im_version(): + """Return Image Magick version or None if it is unavailable + Try invoking ImageMagick's "convert". + """ + try: + out = util.command_output([b'convert', b'--version']) + + if b'imagemagick' in out.lower(): + pattern = br".+ (\d+)\.(\d+)\.(\d+).*" + match = re.search(pattern, out) + if match: + return (int(match.group(1)), + int(match.group(2)), + int(match.group(3))) + return (0,) + + except (subprocess.CalledProcessError, OSError) as exc: + log.debug(u'ImageMagick check `convert --version` failed: {}', exc) + return None + + +def get_pil_version(): + """Return Image Magick version or None if it is unavailable + Try importing PIL.""" + try: + __import__('PIL', fromlist=[str('Image')]) + return (0,) + except ImportError: + return None diff -Nru beets-1.3.8+dfsg/beets/util/bluelet.py beets-1.3.19/beets/util/bluelet.py --- beets-1.3.8+dfsg/beets/util/bluelet.py 2014-04-10 18:18:13.000000000 +0000 +++ beets-1.3.19/beets/util/bluelet.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """Extremely simple pure-Python implementation of coroutine-style asynchronous socket I/O. Inspired by, but inferior to, Eventlet. Bluelet can also be thought of as a less-terrible replacement for @@ -5,6 +7,8 @@ Bluelet: easy concurrency without all the messy parallelism. """ +from __future__ import division, absolute_import, print_function + import socket import select import sys @@ -550,7 +554,7 @@ and child coroutines run concurrently. """ if not isinstance(coro, types.GeneratorType): - raise ValueError('%s is not a coroutine' % str(coro)) + raise ValueError(u'%s is not a coroutine' % coro) return SpawnEvent(coro) @@ -560,7 +564,7 @@ returns a value using end(), then this event returns that value. """ if not isinstance(coro, types.GeneratorType): - raise ValueError('%s is not a coroutine' % str(coro)) + raise ValueError(u'%s is not a coroutine' % coro) return DelegationEvent(coro) diff -Nru beets-1.3.8+dfsg/beets/util/confit.py beets-1.3.19/beets/util/confit.py --- beets-1.3.8+dfsg/beets/util/confit.py 2014-09-14 00:19:04.000000000 +0000 +++ beets-1.3.19/beets/util/confit.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of Confit. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,19 +15,16 @@ """Worry-free YAML configuration files. """ -from __future__ import unicode_literals +from __future__ import division, absolute_import, print_function + import platform import os import pkgutil import sys import yaml -import types import collections import re -try: - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict +from collections import OrderedDict UNIX_DIR_VAR = 'XDG_CONFIG_HOME' UNIX_DIR_FALLBACK = '~/.config' @@ -40,14 +38,15 @@ YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token" +REDACTED_TOMBSTONE = 'REDACTED' + # Utilities. PY3 = sys.version_info[0] == 3 -STRING = str if PY3 else unicode -BASESTRING = str if PY3 else basestring -NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) -TYPE_TYPES = (type,) if PY3 else (type, types.ClassType) +STRING = str if PY3 else unicode # noqa +BASESTRING = str if PY3 else basestring # noqa +NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) # noqa def iter_first(sequence): @@ -56,10 +55,7 @@ """ it = iter(sequence) try: - if PY3: - return next(it) - else: - return it.next() + return next(it) except StopIteration: raise ValueError() @@ -96,17 +92,17 @@ self.filename = filename self.reason = reason - message = 'file {0} could not be read'.format(filename) + message = u'file {0} could not be read'.format(filename) if isinstance(reason, yaml.scanner.ScannerError) and \ reason.problem == YAML_TAB_PROBLEM: # Special-case error message for tab indentation in YAML markup. - message += ': found tab character at line {0}, column {1}'.format( + message += u': found tab character at line {0}, column {1}'.format( reason.problem_mark.line + 1, reason.problem_mark.column + 1, ) elif reason: # Generic error message uses exception's message. - message += ': {0}'.format(reason) + message += u': {0}'.format(reason) super(ConfigReadError, self).__init__(message) @@ -120,19 +116,19 @@ def __init__(self, value, filename=None, default=False): super(ConfigSource, self).__init__(value) if filename is not None and not isinstance(filename, BASESTRING): - raise TypeError('filename must be a string or None') + raise TypeError(u'filename must be a string or None') self.filename = filename self.default = default def __repr__(self): - return 'ConfigSource({0}, {1}, {2})'.format( - super(ConfigSource, self).__repr__(), - repr(self.filename), - repr(self.default) + return 'ConfigSource({0!r}, {1!r}, {2!r})'.format( + super(ConfigSource, self), + self.filename, + self.default, ) @classmethod - def of(self, value): + def of(cls, value): """Given either a dictionary or a `ConfigSource` object, return a `ConfigSource` object. This lets a function accept either type of object as an argument. @@ -142,7 +138,7 @@ elif isinstance(value, dict): return ConfigSource(value) else: - raise TypeError('source value must be a dict') + raise TypeError(u'source value must be a dict') class ConfigView(object): @@ -177,7 +173,7 @@ try: return iter_first(pairs) except ValueError: - raise NotFoundError("{0} not found".format(self.name)) + raise NotFoundError(u"{0} not found".format(self.name)) def exists(self): """Determine whether the view has a setting in any source. @@ -208,7 +204,31 @@ raise NotImplementedError def __repr__(self): - return '' % self.name + return '<{}: {}>'.format(self.__class__.__name__, self.name) + + def __iter__(self): + """Iterate over the keys of a dictionary view or the *subviews* + of a list view. + """ + # Try getting the keys, if this is a dictionary view. + try: + keys = self.keys() + for key in keys: + yield key + + except ConfigTypeError: + # Otherwise, try iterating over a list. + collection = self.get() + if not isinstance(collection, (list, tuple)): + raise ConfigTypeError( + u'{0} must be a dictionary or a list, not {1}'.format( + self.name, type(collection).__name__ + ) + ) + + # Yield all the indices in the list. + for index in range(len(collection)): + yield self[index] def __getitem__(self, key): """Get a subview of this view.""" @@ -239,14 +259,17 @@ # just say ``bool(view)`` or use ``view`` in a conditional. def __str__(self): - """Gets the value for this view as a byte string.""" - return str(self.get()) + """Get the value for this view as a bytestring. + """ + if PY3: + return self.__unicode__() + else: + return bytes(self.get()) def __unicode__(self): - """Gets the value for this view as a unicode string. (Python 2 - only.) + """Get the value for this view as a Unicode string. """ - return unicode(self.get()) + return STRING(self.get()) def __nonzero__(self): """Gets the value for this view as a boolean. (Python 2 only.) @@ -276,7 +299,7 @@ cur_keys = dic.keys() except AttributeError: raise ConfigTypeError( - '{0} must be a dict, not {1}'.format( + u'{0} must be a dict, not {1}'.format( self.name, type(dic).__name__ ) ) @@ -317,7 +340,7 @@ it = iter(collection) except TypeError: raise ConfigTypeError( - '{0} must be an iterable, not {1}'.format( + u'{0} must be an iterable, not {1}'.format( self.name, type(collection).__name__ ) ) @@ -326,17 +349,23 @@ # Validation and conversion. - def flatten(self): + def flatten(self, redact=False): """Create a hierarchy of OrderedDicts containing the data from this view, recursively reifying all views to get their represented values. + + If `redact` is set, then sensitive values are replaced with + the string "REDACTED". """ od = OrderedDict() for key, view in self.items(): - try: - od[key] = view.flatten() - except ConfigTypeError: - od[key] = view.get() + if redact and view.redact: + od[key] = REDACTED_TOMBSTONE + else: + try: + od[key] = view.flatten(redact=redact) + except ConfigTypeError: + od[key] = view.get() return od def get(self, template=None): @@ -354,7 +383,7 @@ """ return as_template(template).value(self, template) - # Old validation methods (deprecated). + # Shortcuts def as_filename(self): return self.get(Filename()) @@ -368,6 +397,30 @@ def as_str_seq(self): return self.get(StrSeq()) + # Redaction. + + @property + def redact(self): + """Whether the view contains sensitive information and should be + redacted from output. + """ + return () in self.get_redactions() + + @redact.setter + def redact(self, flag): + self.set_redaction((), flag) + + def set_redaction(self, path, flag): + """Add or remove a redaction for a key path, which should be an + iterable of keys. + """ + raise NotImplementedError() + + def get_redactions(self): + """Get the set of currently-redacted sub-key-paths at this view. + """ + raise NotImplementedError() + class RootView(ConfigView): """The base of a view hierarchy. This view keeps track of the @@ -380,6 +433,7 @@ """ self.sources = list(sources) self.name = ROOT_NAME + self.redactions = set() def add(self, obj): self.sources.append(ConfigSource.of(obj)) @@ -391,12 +445,24 @@ return ((dict(s), s) for s in self.sources) def clear(self): - """Remove all sources from this configuration.""" + """Remove all sources (and redactions) from this + configuration. + """ del self.sources[:] + self.redactions.clear() def root(self): return self + def set_redaction(self, path, flag): + if flag: + self.redactions.add(path) + elif path in self.redactions: + self.redactions.remove(path) + + def get_redactions(self): + return self.redactions + class Subview(ConfigView): """A subview accessed via a subscript of a parent view.""" @@ -414,11 +480,14 @@ if not isinstance(self.key, int): self.name += '.' if isinstance(self.key, int): - self.name += '#{0}'.format(self.key) - elif isinstance(self.key, BASESTRING): - self.name += '{0}'.format(self.key) + self.name += u'#{0}'.format(self.key) + elif isinstance(self.key, (bytes, BASESTRING)): + if isinstance(self.key, STRING): + self.name += self.key + else: + self.name += self.key.decode('utf8') else: - self.name += '{0}'.format(repr(self.key)) + self.name += repr(self.key) def resolve(self): for collection, source in self.parent.resolve(): @@ -433,7 +502,7 @@ except TypeError: # Not subscriptable. raise ConfigTypeError( - "{0} must be a collection, not {1}".format( + u"{0} must be a collection, not {1}".format( self.parent.name, type(collection).__name__ ) ) @@ -448,6 +517,13 @@ def root(self): return self.parent.root() + def set_redaction(self, path, flag): + self.parent.set_redaction((self.key,) + path, flag) + + def get_redactions(self): + return (kp[1:] for kp in self.parent.get_redactions() + if kp and kp[0] == self.key) + # Config file paths, including platform-specific paths and in-package # defaults. @@ -536,7 +612,7 @@ else: raise yaml.constructor.ConstructorError( None, None, - 'expected a mapping node, but found %s' % node.id, + u'expected a mapping node, but found %s' % node.id, node.start_mark ) @@ -547,7 +623,7 @@ hash(key) except TypeError as exc: raise yaml.constructor.ConstructorError( - 'while constructing a mapping', + u'while constructing a mapping', node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark ) @@ -595,11 +671,11 @@ for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) - if not (isinstance(node_key, yaml.ScalarNode) - and not node_key.style): + if not (isinstance(node_key, yaml.ScalarNode) and + not node_key.style): best_style = False - if not (isinstance(node_value, yaml.ScalarNode) - and not node_value.style): + if not (isinstance(node_value, yaml.ScalarNode) and + not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: @@ -625,9 +701,9 @@ """Represent bool as 'yes' or 'no' instead of 'true' or 'false'. """ if data: - value = 'yes' + value = u'yes' else: - value = 'no' + value = u'no' return self.represent_scalar('tag:yaml.org,2002:bool', value) def represent_none(self, data): @@ -752,7 +828,7 @@ appdir = os.environ[self._env_var] appdir = os.path.abspath(os.path.expanduser(appdir)) if os.path.isfile(appdir): - raise ConfigError('{0} must be a directory'.format( + raise ConfigError(u'{0} must be a directory'.format( self._env_var )) @@ -776,7 +852,7 @@ filename = os.path.abspath(filename) self.set(ConfigSource(load_yaml(filename), filename)) - def dump(self, full=True): + def dump(self, full=True, redact=False): """Dump the Configuration object to a YAML file. The order of the keys is determined from the default @@ -788,13 +864,17 @@ :type filename: unicode :param full: Dump settings that don't differ from the defaults as well + :param redact: Remove sensitive information (views with the `redact` + flag set) from the output """ if full: - out_dict = self.flatten() + out_dict = self.flatten(redact=redact) else: # Exclude defaults when flattening. sources = [s for s in self.sources if not s.default] - out_dict = RootView(sources).flatten() + temp_root = RootView(sources) + temp_root.redactions = self.redactions + out_dict = temp_root.flatten(redact=redact) yaml_out = yaml.dump(out_dict, Dumper=Dumper, default_flow_style=None, indent=4, @@ -806,7 +886,7 @@ if source.default: default_source = source break - if default_source: + if default_source and default_source.filename: with open(default_source.filename, 'r') as fp: default_data = fp.read() yaml_out = restore_yaml_comments(yaml_out, default_data) @@ -853,7 +933,7 @@ def clear(self): """Remove all sources from this configuration.""" - del self.sources[:] + super(LazyConfig, self).clear() self._lazy_suffix = [] self._lazy_prefix = [] @@ -901,7 +981,7 @@ return self.convert(value, view) elif self.default is REQUIRED: # Missing required value. This is an error. - raise NotFoundError("{0} not found".format(view.name)) + raise NotFoundError(u"{0} not found".format(view.name)) else: # Missing value, but not required. return self.default @@ -926,7 +1006,7 @@ """ exc_class = ConfigTypeError if type_error else ConfigValueError raise exc_class( - '{0}: {1}'.format(view.name, message) + u'{0}: {1}'.format(view.name, message) ) def __repr__(self): @@ -947,7 +1027,7 @@ elif isinstance(value, float): return int(value) else: - self.fail('must be a number', view, True) + self.fail(u'must be a number', view, True) class Number(Template): @@ -960,7 +1040,7 @@ return value else: self.fail( - 'must be numeric, not {0}'.format(type(value).__name__), + u'must be numeric, not {0}'.format(type(value).__name__), view, True ) @@ -1005,18 +1085,29 @@ if pattern: self.regex = re.compile(pattern) + def __repr__(self): + args = [] + + if self.default is not REQUIRED: + args.append(repr(self.default)) + + if self.pattern is not None: + args.append('pattern=' + repr(self.pattern)) + + return 'String({0})'.format(', '.join(args)) + def convert(self, value, view): """Check that the value is a string and matches the pattern. """ if isinstance(value, BASESTRING): if self.pattern and not self.regex.match(value): self.fail( - "must match the pattern {0}".format(self.pattern), + u"must match the pattern {0}".format(self.pattern), view ) return value else: - self.fail('must be a string', view, True) + self.fail(u'must be a string', view, True) class Choice(Template): @@ -1037,7 +1128,7 @@ """ if value not in self.choices: self.fail( - 'must be one of {0}, not {1}'.format( + u'must be one of {0}, not {1}'.format( repr(list(self.choices)), repr(value) ), view @@ -1052,6 +1143,67 @@ return 'Choice({0!r})'.format(self.choices) +class OneOf(Template): + """A template that permits values complying to one of the given templates. + """ + def __init__(self, allowed, default=REQUIRED): + super(OneOf, self).__init__(default) + self.allowed = list(allowed) + + def __repr__(self): + args = [] + + if self.allowed is not None: + args.append('allowed=' + repr(self.allowed)) + + if self.default is not REQUIRED: + args.append(repr(self.default)) + + return 'OneOf({0})'.format(', '.join(args)) + + def value(self, view, template): + self.template = template + return super(OneOf, self).value(view, template) + + def convert(self, value, view): + """Ensure that the value follows at least one template. + """ + is_mapping = isinstance(self.template, MappingTemplate) + + for candidate in self.allowed: + try: + if is_mapping: + if isinstance(candidate, Filename) and \ + candidate.relative_to: + next_template = candidate.template_with_relatives( + view, + self.template + ) + + next_template.subtemplates[view.key] = as_template( + candidate + ) + else: + next_template = MappingTemplate({view.key: candidate}) + + return view.parent.get(next_template)[view.key] + else: + return view.get(candidate) + except ConfigTemplateError: + raise + except ConfigError: + pass + except ValueError as exc: + raise ConfigTemplateError(exc) + + self.fail( + u'must be one of {0}, not {1}'.format( + repr(self.allowed), repr(value) + ), + view + ) + + class StrSeq(Template): """A template for values that are lists of strings. @@ -1081,17 +1233,17 @@ try: value = list(value) except TypeError: - self.fail('must be a whitespace-separated string or a list', + self.fail(u'must be a whitespace-separated string or a list', view, True) def convert(x): - if isinstance(x, unicode): + if isinstance(x, STRING): return x - elif isinstance(x, BASESTRING): + elif isinstance(x, bytes): return x.decode('utf8', 'ignore') else: - self.fail('must be a list of strings', view, True) - return map(convert, value) + self.fail(u'must be a list of strings', view, True) + return list(map(convert, value)) class Filename(Template): @@ -1105,12 +1257,19 @@ they are relative to the current working directory. This helps attain the expected behavior when using command-line options. """ - def __init__(self, default=REQUIRED, cwd=None, relative_to=None): - """ `relative_to` is the name of a sibling value that is + def __init__(self, default=REQUIRED, cwd=None, relative_to=None, + in_app_dir=False): + """`relative_to` is the name of a sibling value that is being validated at the same time. + + `in_app_dir` indicates whether the path should be resolved + inside the application's config directory (even when the setting + does not come from a file). """ super(Filename, self).__init__(default) - self.cwd, self.relative_to = cwd, relative_to + self.cwd = cwd + self.relative_to = relative_to + self.in_app_dir = in_app_dir def __repr__(self): args = [] @@ -1124,25 +1283,28 @@ if self.relative_to is not None: args.append('relative_to=' + repr(self.relative_to)) + if self.in_app_dir: + args.append('in_app_dir=True') + return 'Filename({0})'.format(', '.join(args)) def resolve_relative_to(self, view, template): if not isinstance(template, (collections.Mapping, MappingTemplate)): # disallow config.get(Filename(relative_to='foo')) raise ConfigTemplateError( - 'relative_to may only be used when getting multiple values.' + u'relative_to may only be used when getting multiple values.' ) elif self.relative_to == view.key: raise ConfigTemplateError( - '{0} is relative to itself'.format(view.name) + u'{0} is relative to itself'.format(view.name) ) elif self.relative_to not in view.parent.keys(): # self.relative_to is not in the config self.fail( ( - 'needs sibling value "{0}" to expand relative path' + u'needs sibling value "{0}" to expand relative path' ).format(self.relative_to), view ) @@ -1164,12 +1326,12 @@ if next_relative in template.subtemplates: # we encountered this config key previously raise ConfigTemplateError(( - '{0} and {1} are recursively relative' + u'{0} and {1} are recursively relative' ).format(view.name, self.relative_to)) else: raise ConfigTemplateError(( - 'missing template for {0}, needed to expand {1}\'s' + - 'relative path' + u'missing template for {0}, needed to expand {1}\'s' + + u'relative path' ).format(self.relative_to, view.name)) next_template.subtemplates[next_relative] = rel_to_template @@ -1179,9 +1341,9 @@ def value(self, view, template=None): path, source = view.first() - if not isinstance(path, BASESTRING): + if not isinstance(path, (bytes, BASESTRING)): self.fail( - 'must be a filename, not {0}'.format(type(path).__name__), + u'must be a filename, not {0}'.format(type(path).__name__), view, True ) @@ -1198,7 +1360,7 @@ path, ) - elif source.filename: + elif source.filename or self.in_app_dir: # From defaults: relative to the app's directory. path = os.path.join(view.root().config_dir(), path) @@ -1219,7 +1381,7 @@ def convert(self, value, view): if not isinstance(value, self.typ): self.fail( - 'must be a {0}, not {1}'.format( + u'must be a {0}, not {1}'.format( self.typ.__name__, type(value).__name__, ), @@ -1257,6 +1419,11 @@ return String() elif isinstance(value, BASESTRING): return String(value) + elif isinstance(value, set): + # convert to list to avoid hash related problems + return Choice(list(value)) + elif isinstance(value, list): + return OneOf(value) elif value is float: return Number() elif value is None: @@ -1268,4 +1435,4 @@ elif isinstance(value, type): return TypeTemplate(value) else: - raise ValueError('cannot convert to template: {0!r}'.format(value)) + raise ValueError(u'cannot convert to template: {0!r}'.format(value)) diff -Nru beets-1.3.8+dfsg/beets/util/enumeration.py beets-1.3.19/beets/util/enumeration.py --- beets-1.3.8+dfsg/beets/util/enumeration.py 2014-04-10 18:18:13.000000000 +0000 +++ beets-1.3.19/beets/util/enumeration.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,6 +13,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + from enum import Enum diff -Nru beets-1.3.8+dfsg/beets/util/functemplate.py beets-1.3.19/beets/util/functemplate.py --- beets-1.3.8+dfsg/beets/util/functemplate.py 2014-04-14 03:38:32.000000000 +0000 +++ beets-1.3.19/beets/util/functemplate.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -25,13 +26,16 @@ This is sort of like a tiny, horrible degeneration of a real templating engine like Jinja2 or Mustache. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import re import ast import dis import types +from .confit import NUMERIC_TYPES + SYMBOL_DELIM = u'$' FUNC_DELIM = u'%' GROUP_OPEN = u'{' @@ -70,13 +74,13 @@ """ if val is None: return ast.Name('None', ast.Load()) - elif isinstance(val, (int, float, long)): + elif isinstance(val, NUMERIC_TYPES): return ast.Num(val) elif isinstance(val, bool): - return ast.Name(str(val), ast.Load()) + return ast.Name(bytes(val), ast.Load()) elif isinstance(val, basestring): return ast.Str(val) - raise TypeError('no literal for {0}'.format(type(val))) + raise TypeError(u'no literal for {0}'.format(type(val))) def ex_varassign(name, expr): @@ -110,7 +114,7 @@ bytecode of the compiled function. """ func_def = ast.FunctionDef( - name, + name.encode('utf8'), ast.arguments( [ast.Name(n, ast.Param()) for n in arg_names], None, None, @@ -132,7 +136,7 @@ dis.dis(const) the_locals = {} - exec prog in {}, the_locals + exec(prog, {}, the_locals) return the_locals[name] @@ -289,7 +293,7 @@ # Common parsing resources. special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE, ARG_SEP, ESCAPE_CHAR) - special_char_re = re.compile(ur'[%s]|$' % + special_char_re = re.compile(r'[%s]|$' % u''.join(re.escape(c) for c in special_chars)) def parse_expression(self): @@ -307,8 +311,8 @@ # A non-special character. Skip to the next special # character, treating the interstice as literal text. next_pos = ( - self.special_char_re.search(self.string[self.pos:]).start() - + self.pos + self.special_char_re.search( + self.string[self.pos:]).start() + self.pos ) text_parts.append(self.string[self.pos:next_pos]) self.pos = next_pos @@ -477,7 +481,7 @@ Updates ``pos``. """ remainder = self.string[self.pos:] - ident = re.match(ur'\w*', remainder).group(0) + ident = re.match(r'\w*', remainder).group(0) self.pos += len(ident) return ident @@ -532,9 +536,9 @@ argnames = [] for varname in varnames: - argnames.append(VARIABLE_PREFIX.encode('utf8') + varname) + argnames.append(VARIABLE_PREFIX + varname) for funcname in funcnames: - argnames.append(FUNCTION_PREFIX.encode('utf8') + funcname) + argnames.append(FUNCTION_PREFIX + funcname) func = compile_func( argnames, @@ -568,4 +572,4 @@ 'from __main__ import _tmpl, _vars, _funcs', number=10000) print(comp_time) - print('Speedup:', interp_time / comp_time) + print(u'Speedup:', interp_time / comp_time) diff -Nru beets-1.3.8+dfsg/beets/util/hidden.py beets-1.3.19/beets/util/hidden.py --- beets-1.3.8+dfsg/beets/util/hidden.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beets/util/hidden.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Simple library to work out if a file is hidden on different platforms.""" +from __future__ import division, absolute_import, print_function + +import os +import stat +import ctypes +import sys + + +def _is_hidden_osx(path): + """Return whether or not a file is hidden on OS X. + + This uses os.lstat to work out if a file has the "hidden" flag. + """ + file_stat = os.lstat(path) + + if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'): + return bool(file_stat.st_flags & stat.UF_HIDDEN) + else: + return False + + +def _is_hidden_win(path): + """Return whether or not a file is hidden on Windows. + + This uses GetFileAttributes to work out if a file has the "hidden" flag + (FILE_ATTRIBUTE_HIDDEN). + """ + # FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation. + hidden_mask = 2 + + # Retrieve the attributes for the file. + attrs = ctypes.windll.kernel32.GetFileAttributesW(path) + + # Ensure we have valid attribues and compare them against the mask. + return attrs >= 0 and attrs & hidden_mask + + +def _is_hidden_dot(path): + """Return whether or not a file starts with a dot. + + Files starting with a dot are seen as "hidden" files on Unix-based OSes. + """ + return os.path.basename(path).startswith('.') + + +def is_hidden(path): + """Return whether or not a file is hidden. + + This method works differently depending on the platform it is called on. + + On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to + work out if a file is hidden. + + On Windows, it uses the result of `is_hidden_win` to work out if a file is + hidden. + + On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to + work out if a file is hidden. + """ + # Convert the path to unicode if it is not already. + if not isinstance(path, unicode): + path = path.decode('utf-8') + + # Run platform specific functions depending on the platform + if sys.platform == 'darwin': + return _is_hidden_osx(path) or _is_hidden_dot(path) + elif sys.platform == 'win32': + return _is_hidden_win(path) + else: + return _is_hidden_dot(path) + +__all__ = ['is_hidden'] diff -Nru beets-1.3.8+dfsg/beets/util/__init__.py beets-1.3.19/beets/util/__init__.py --- beets-1.3.8+dfsg/beets/util/__init__.py 2014-08-19 04:25:31.000000000 +0000 +++ beets-1.3.19/beets/util/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,17 +14,19 @@ # included in all copies or substantial portions of the Software. """Miscellaneous utility functions.""" -from __future__ import division +from __future__ import division, absolute_import, print_function import os import sys import re import shutil import fnmatch -from collections import defaultdict +from collections import Counter import traceback import subprocess import platform +import shlex +from beets.util import hidden MAX_FILENAME_LENGTH = 200 @@ -54,17 +57,17 @@ def _gerund(self): """Generate a (likely) gerund form of the English verb. """ - if ' ' in self.verb: + if u' ' in self.verb: return self.verb - gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb - gerund += 'ing' + gerund = self.verb[:-1] if self.verb.endswith(u'e') else self.verb + gerund += u'ing' return gerund def _reasonstr(self): """Get the reason as a string.""" if isinstance(self.reason, unicode): return self.reason - elif isinstance(self.reason, basestring): # Byte string. + elif isinstance(self.reason, bytes): return self.reason.decode('utf8', 'ignore') elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError return self.reason.strerror @@ -83,7 +86,7 @@ """ if self.tb: logger.debug(self.tb) - logger.error(u'{0}: {1}'.format(self.error_kind, self.args[0])) + logger.error(u'{0}: {1}', self.error_kind, self.args[0]) class FilesystemError(HumanReadableException): @@ -149,14 +152,15 @@ return out -def sorted_walk(path, ignore=(), logger=None): +def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None): """Like `os.walk`, but yields things in case-insensitive sorted, breadth-first order. Directory and file names matching any glob pattern in `ignore` are skipped. If `logger` is provided, then warning messages are logged there when a directory cannot be listed. """ - # Make sure the path isn't a Unicode string. + # Make sure the pathes aren't Unicode strings. path = bytestring_path(path) + ignore = [bytestring_path(i) for i in ignore] # Get all the directories and files at this level. try: @@ -183,10 +187,11 @@ # Add to output as either a file or a directory. cur = os.path.join(path, base) - if os.path.isdir(syspath(cur)): - dirs.append(base) - else: - files.append(base) + if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden: + if os.path.isdir(syspath(cur)): + dirs.append(base) + else: + files.append(base) # Sort lists (case-insensitive) and yield the current level. dirs.sort(key=bytes.lower) @@ -197,7 +202,7 @@ for base in dirs: cur = os.path.join(path, base) # yield from sorted_walk(...) - for res in sorted_walk(cur, ignore, logger): + for res in sorted_walk(cur, ignore, ignore_hidden, logger): yield res @@ -260,6 +265,7 @@ if not os.path.exists(directory): # Directory gone already. continue + clutter = [bytestring_path(c) for c in clutter] if fnmatch_all(os.listdir(directory), clutter): # Directory contains only clutter (or nothing). try: @@ -310,11 +316,11 @@ def bytestring_path(path): - """Given a path, which is either a str or a unicode, returns a str + """Given a path, which is either a bytes or a unicode, returns a str path (ensuring that we never deal with Unicode pathnames). """ # Pass through bytestrings. - if isinstance(path, str): + if isinstance(path, bytes): return path # On Windows, remove the magic prefix added by `syspath`. This makes @@ -330,6 +336,9 @@ return path.encode('utf8') +PATH_SEP = bytestring_path(os.sep) + + def displayable_path(path, separator=u'; '): """Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a @@ -339,7 +348,7 @@ return separator.join(displayable_path(p) for p in path) elif isinstance(path, unicode): return path - elif not isinstance(path, str): + elif not isinstance(path, bytes): # A non-string object: just get its unicode representation. return unicode(path) @@ -412,7 +421,7 @@ path = syspath(path) dest = syspath(dest) if not replace and os.path.exists(dest): - raise FilesystemError('file exists', 'copy', (path, dest)) + raise FilesystemError(u'file exists', 'copy', (path, dest)) try: shutil.copyfile(path, dest) except (OSError, IOError) as exc: @@ -433,7 +442,7 @@ path = syspath(path) dest = syspath(dest) if os.path.exists(dest) and not replace: - raise FilesystemError('file exists', 'rename', (path, dest), + raise FilesystemError(u'file exists', 'rename', (path, dest), traceback.format_exc()) # First, try renaming the file. @@ -449,6 +458,26 @@ traceback.format_exc()) +def link(path, dest, replace=False): + """Create a symbolic link from path to `dest`. Raises an OSError if + `dest` already exists, unless `replace` is True. Does nothing if + `path` == `dest`.""" + if (samefile(path, dest)): + return + + path = syspath(path) + dest = syspath(dest) + if os.path.exists(dest) and not replace: + raise FilesystemError(u'file exists', 'rename', (path, dest), + traceback.format_exc()) + try: + os.symlink(path, dest) + except OSError: + raise FilesystemError(u'Operating system does not support symbolic ' + u'links.', 'link', (path, dest), + traceback.format_exc()) + + def unique_path(path): """Returns a version of ``path`` that does not exist on the filesystem. Specifically, if ``path` itself already exists, then @@ -458,7 +487,7 @@ return path base, ext = os.path.splitext(path) - match = re.search(r'\.(\d)+$', base) + match = re.search(br'\.(\d)+$', base) if match: num = int(match.group(1)) base = base[:match.start()] @@ -466,7 +495,7 @@ num = 0 while True: num += 1 - new_path = '%s.%i%s' % (base, num, ext) + new_path = b'%s.%i%s' % (base, num, ext) if not os.path.exists(new_path): return new_path @@ -475,12 +504,12 @@ # shares, which are sufficiently common as to cause frequent problems. # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx CHAR_REPLACE = [ - (re.compile(ur'[\\/]'), u'_'), # / and \ -- forbidden everywhere. - (re.compile(ur'^\.'), u'_'), # Leading dot (hidden files on Unix). - (re.compile(ur'[\x00-\x1f]'), u''), # Control characters. - (re.compile(ur'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters". - (re.compile(ur'\.$'), u'_'), # Trailing dots. - (re.compile(ur'\s+$'), u''), # Trailing whitespace. + (re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere. + (re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix). + (re.compile(r'[\x00-\x1f]'), u''), # Control characters. + (re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters". + (re.compile(r'\.$'), u'_'), # Trailing dots. + (re.compile(r'\s+$'), u''), # Trailing whitespace. ] @@ -522,12 +551,81 @@ return os.path.join(*out) +def _legalize_stage(path, replacements, length, extension, fragment): + """Perform a single round of path legalization steps + (sanitation/replacement, encoding from Unicode to bytes, + extension-appending, and truncation). Return the path (Unicode if + `fragment` is set, `bytes` otherwise) and whether truncation was + required. + """ + # Perform an initial sanitization including user replacements. + path = sanitize_path(path, replacements) + + # Encode for the filesystem. + if not fragment: + path = bytestring_path(path) + + # Preserve extension. + path += extension.lower() + + # Truncate too-long components. + pre_truncate_path = path + path = truncate_path(path, length) + + return path, path != pre_truncate_path + + +def legalize_path(path, replacements, length, extension, fragment): + """Given a path-like Unicode string, produce a legal path. Return + the path and a flag indicating whether some replacements had to be + ignored (see below). + + The legalization process (see `_legalize_stage`) consists of + applying the sanitation rules in `replacements`, encoding the string + to bytes (unless `fragment` is set), truncating components to + `length`, appending the `extension`. + + This function performs up to three calls to `_legalize_stage` in + case truncation conflicts with replacements (as can happen when + truncation creates whitespace at the end of the string, for + example). The limited number of iterations iterations avoids the + possibility of an infinite loop of sanitation and truncation + operations, which could be caused by replacement rules that make the + string longer. The flag returned from this function indicates that + the path has to be truncated twice (indicating that replacements + made the string longer again after it was truncated); the + application should probably log some sort of warning. + """ + + if fragment: + # Outputting Unicode. + extension = extension.decode('utf8', 'ignore') + + first_stage_path, _ = _legalize_stage( + path, replacements, length, extension, fragment + ) + + # Convert back to Unicode with extension removed. + first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path)) + + # Re-sanitize following truncation (including user replacements). + second_stage_path, retruncated = _legalize_stage( + first_stage_path, replacements, length, extension, fragment + ) + + # If the path was once again truncated, discard user replacements + # and run through one last legalization stage. + if retruncated: + second_stage_path, _ = _legalize_stage( + first_stage_path, None, length, extension, fragment + ) + + return second_stage_path, retruncated + + def str2bool(value): """Returns a boolean reflecting a human-entered string.""" - if value.lower() in ('yes', '1', 'true', 't', 'y'): - return True - else: - return False + return value.lower() in (u'yes', u'1', u'true', u't', u'y') def as_string(value): @@ -537,58 +635,22 @@ if value is None: return u'' elif isinstance(value, buffer): - return str(value).decode('utf8', 'ignore') - elif isinstance(value, str): + return bytes(value).decode('utf8', 'ignore') + elif isinstance(value, bytes): return value.decode('utf8', 'ignore') else: return unicode(value) -def levenshtein(s1, s2): - """A nice DP edit distance implementation from Wikibooks: - http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/ - Levenshtein_distance#Python - """ - if len(s1) < len(s2): - return levenshtein(s2, s1) - if not s1: - return len(s2) - - previous_row = xrange(len(s2) + 1) - for i, c1 in enumerate(s1): - current_row = [i + 1] - for j, c2 in enumerate(s2): - insertions = previous_row[j + 1] + 1 - deletions = current_row[j] + 1 - substitutions = previous_row[j] + (c1 != c2) - current_row.append(min(insertions, deletions, substitutions)) - previous_row = current_row - - return previous_row[-1] - - def plurality(objs): - """Given a sequence of comparable objects, returns the object that - is most common in the set and the frequency of that object. The + """Given a sequence of hashble objects, returns the object that + is most common in the set and the its number of appearance. The sequence must contain at least one object. """ - # Calculate frequencies. - freqs = defaultdict(int) - for obj in objs: - freqs[obj] += 1 - - if not freqs: - raise ValueError('sequence must be non-empty') - - # Find object with maximum frequency. - max_freq = 0 - res = None - for obj, freq in freqs.items(): - if freq > max_freq: - max_freq = freq - res = obj - - return res, max_freq + c = Counter(objs) + if not c: + raise ValueError(u'sequence must be non-empty') + return c.most_common(1)[0] def cpu_count(): @@ -604,8 +666,8 @@ num = 0 elif sys.platform == 'darwin': try: - num = int(command_output(['sysctl', '-n', 'hw.ncpu'])) - except ValueError: + num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu'])) + except (ValueError, OSError, subprocess.CalledProcessError): num = 0 else: try: @@ -621,25 +683,31 @@ def command_output(cmd, shell=False): """Runs the command and returns its output after it has exited. - ``cmd`` is a list of arguments starting with the command names. If - ``shell`` is true, ``cmd`` is assumed to be a string and passed to a + ``cmd`` is a list of byte string arguments starting with the command names. + If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a shell to execute. If the process exits with a non-zero return code ``subprocess.CalledProcessError`` is raised. May also raise ``OSError``. - This replaces `subprocess.check_output`, which isn't available in - Python 2.6 and which can have problems if lots of output is sent to - stderr. - """ - with open(os.devnull, 'wb') as devnull: - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull, - close_fds=platform.system() != 'Windows', - shell=shell) - stdout, _ = proc.communicate() + This replaces `subprocess.check_output` which can have problems if lots of + output is sent to stderr. + """ + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=platform.system() != 'Windows', + shell=shell + ) + stdout, stderr = proc.communicate() if proc.returncode: - raise subprocess.CalledProcessError(proc.returncode, cmd) + raise subprocess.CalledProcessError( + returncode=proc.returncode, + cmd=b' '.join(cmd), + output=stdout + stderr, + ) return stdout @@ -658,3 +726,142 @@ return min(res[9], limit) else: return limit + + +def open_anything(): + """Return the system command that dispatches execution to the correct + program. + """ + sys_name = platform.system() + if sys_name == 'Darwin': + base_cmd = 'open' + elif sys_name == 'Windows': + base_cmd = 'start' + else: # Assume Unix + base_cmd = 'xdg-open' + return base_cmd + + +def editor_command(): + """Get a command for opening a text file. + + Use the `EDITOR` environment variable by default. If it is not + present, fall back to `open_anything()`, the platform-specific tool + for opening files in general. + """ + editor = os.environ.get('EDITOR') + if editor: + return editor + return open_anything() + + +def shlex_split(s): + """Split a Unicode or bytes string according to shell lexing rules. + + Raise `ValueError` if the string is not a well-formed shell string. + This is a workaround for a bug in some versions of Python. + """ + if isinstance(s, bytes): + # Shlex works fine. + return shlex.split(s) + + elif isinstance(s, unicode): + # Work around a Python bug. + # http://bugs.python.org/issue6988 + bs = s.encode('utf8') + return [c.decode('utf8') for c in shlex.split(bs)] + + else: + raise TypeError(u'shlex_split called with non-string') + + +def interactive_open(targets, command): + """Open the files in `targets` by `exec`ing a new `command`, given + as a Unicode string. (The new program takes over, and Python + execution ends: this does not fork a subprocess.) + + Can raise `OSError`. + """ + assert command + + # Split the command string into its arguments. + try: + args = shlex_split(command) + except ValueError: # Malformed shell tokens. + args = [command] + + args.insert(0, args[0]) # for argv[0] + + args += targets + + return os.execlp(*args) + + +def _windows_long_path_name(short_path): + """Use Windows' `GetLongPathNameW` via ctypes to get the canonical, + long path given a short filename. + """ + if not isinstance(short_path, unicode): + short_path = short_path.decode(_fsencoding()) + + import ctypes + buf = ctypes.create_unicode_buffer(260) + get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW + return_value = get_long_path_name_w(short_path, buf, 260) + + if return_value == 0 or return_value > 260: + # An error occurred + return short_path + else: + long_path = buf.value + # GetLongPathNameW does not change the case of the drive + # letter. + if len(long_path) > 1 and long_path[1] == ':': + long_path = long_path[0].upper() + long_path[1:] + return long_path + + +def case_sensitive(path): + """Check whether the filesystem at the given path is case sensitive. + + To work best, the path should point to a file or a directory. If the path + does not exist, assume a case sensitive file system on every platform + except Windows. + """ + # A fallback in case the path does not exist. + if not os.path.exists(syspath(path)): + # By default, the case sensitivity depends on the platform. + return platform.system() != 'Windows' + + # If an upper-case version of the path exists but a lower-case + # version does not, then the filesystem must be case-sensitive. + # (Otherwise, we have more work to do.) + if not (os.path.exists(syspath(path.lower())) and + os.path.exists(syspath(path.upper()))): + return True + + # Both versions of the path exist on the file system. Check whether + # they refer to different files by their inodes. Alas, + # `os.path.samefile` is only available on Unix systems on Python 2. + if platform.system() != 'Windows': + return not os.path.samefile(syspath(path.lower()), + syspath(path.upper())) + + # On Windows, we check whether the canonical, long filenames for the + # files are the same. + lower = _windows_long_path_name(path.lower()) + upper = _windows_long_path_name(path.upper()) + return lower != upper + + +def raw_seconds_short(string): + """Formats a human-readable M:SS string as a float (number of seconds). + + Raises ValueError if the conversion cannot take place due to `string` not + being in the right format. + """ + match = re.match(r'^(\d+):([0-5]\d)$', string) + if not match: + raise ValueError(u'String not in M:SS format') + minutes, seconds = map(int, match.groups()) + return float(minutes * 60 + seconds) diff -Nru beets-1.3.8+dfsg/beets/util/pipeline.py beets-1.3.19/beets/util/pipeline.py --- beets-1.3.8+dfsg/beets/util/pipeline.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beets/util/pipeline.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -30,7 +31,8 @@ To do so, pass an iterable of coroutines to the Pipeline constructor in place of any single coroutine. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import Queue from threading import Thread, Lock @@ -246,7 +248,7 @@ # Get the value from the generator. try: - msg = self.coro.next() + msg = next(self.coro) except StopIteration: break @@ -279,7 +281,7 @@ def run(self): try: # Prime the coroutine. - self.coro.next() + next(self.coro) while True: with self.abort_lock: @@ -324,7 +326,7 @@ def run(self): # Prime the coroutine. - self.coro.next() + next(self.coro) try: while True: @@ -359,7 +361,7 @@ be at least two stages. """ if len(stages) < 2: - raise ValueError('pipeline must have at least two stages') + raise ValueError(u'pipeline must have at least two stages') self.stages = [] for stage in stages: if isinstance(stage, (list, tuple)): @@ -442,7 +444,7 @@ # "Prime" the coroutines. for coro in coros[1:]: - coro.next() + next(coro) # Begin the pipeline. for out in coros[0]: @@ -464,14 +466,14 @@ # in parallel. def produce(): for i in range(5): - print('generating %i' % i) + print(u'generating %i' % i) time.sleep(1) yield i def work(): num = yield while True: - print('processing %i' % num) + print(u'processing %i' % num) time.sleep(2) num = yield num * 2 @@ -479,7 +481,7 @@ while True: num = yield time.sleep(1) - print('received %i' % num) + print(u'received %i' % num) ts_start = time.time() Pipeline([produce(), work(), consume()]).run_sequential() @@ -488,22 +490,22 @@ ts_par = time.time() Pipeline([produce(), (work(), work()), consume()]).run_parallel() ts_end = time.time() - print('Sequential time:', ts_seq - ts_start) - print('Parallel time:', ts_par - ts_seq) - print('Multiply-parallel time:', ts_end - ts_par) + print(u'Sequential time:', ts_seq - ts_start) + print(u'Parallel time:', ts_par - ts_seq) + print(u'Multiply-parallel time:', ts_end - ts_par) print() # Test a pipeline that raises an exception. def exc_produce(): for i in range(10): - print('generating %i' % i) + print(u'generating %i' % i) time.sleep(1) yield i def exc_work(): num = yield while True: - print('processing %i' % num) + print(u'processing %i' % num) time.sleep(3) if num == 3: raise Exception() @@ -512,6 +514,6 @@ def exc_consume(): while True: num = yield - print('received %i' % num) + print(u'received %i' % num) Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1) diff -Nru beets-1.3.8+dfsg/beets/vfs.py beets-1.3.19/beets/vfs.py --- beets-1.3.8+dfsg/beets/vfs.py 2014-04-10 18:18:13.000000000 +0000 +++ beets-1.3.19/beets/vfs.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,6 +16,8 @@ """A simple utility for constructing filesystem-like trees from beets libraries. """ +from __future__ import division, absolute_import, print_function + from collections import namedtuple from beets import util diff -Nru beets-1.3.8+dfsg/beets.egg-info/pbr.json beets-1.3.19/beets.egg-info/pbr.json --- beets-1.3.8+dfsg/beets.egg-info/pbr.json 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beets.egg-info/pbr.json 2016-06-26 00:52:50.000000000 +0000 @@ -0,0 +1 @@ +{"is_release": false, "git_version": "9d66e2c"} \ No newline at end of file diff -Nru beets-1.3.8+dfsg/beets.egg-info/PKG-INFO beets-1.3.19/beets.egg-info/PKG-INFO --- beets-1.3.8+dfsg/beets.egg-info/PKG-INFO 2014-09-18 02:18:16.000000000 +0000 +++ beets-1.3.19/beets.egg-info/PKG-INFO 2016-06-26 00:52:50.000000000 +0000 @@ -1,19 +1,23 @@ Metadata-Version: 1.1 Name: beets -Version: 1.3.8 +Version: 1.3.19 Summary: music tagger and library organizer -Home-page: http://beets.radbox.org/ +Home-page: http://beets.io/ Author: Adrian Sampson Author-email: adrian@radbox.org License: MIT -Description: .. image:: https://travis-ci.org/sampsyo/beets.svg?branch=master - :target: https://travis-ci.org/sampsyo/beets +Description: .. image:: http://img.shields.io/pypi/v/beets.svg + :target: https://pypi.python.org/pypi/beets - .. image:: http://img.shields.io/coveralls/sampsyo/beets.svg - :target: https://coveralls.io/r/sampsyo/beets + .. image:: https://img.shields.io/pypi/dw/beets.svg + :target: https://pypi.python.org/pypi/beets#downloads + + .. image:: http://img.shields.io/codecov/c/github/beetbox/beets.svg + :target: https://codecov.io/github/beetbox/beets + + .. image:: https://travis-ci.org/beetbox/beets.svg?branch=master + :target: https://travis-ci.org/beetbox/beets - .. image:: http://img.shields.io/pypi/v/beets.svg - :target: https://pypi.python.org/pypi/beets Beets is the media library management system for obsessive-compulsive music geeks. @@ -39,7 +43,7 @@ - Fetch or calculate all the metadata you could possibly need: `album art`_, `lyrics`_, `genres`_, `tempos`_, `ReplayGain`_ levels, or `acoustic fingerprints`_. - - Get metadata from `MusicBrainz`_, `Discogs`_, or `Beatport`_. Or guess + - Get metadata from `MusicBrainz`_, `Discogs`_, and `Beatport`_. Or guess metadata using songs' filenames or their acoustic fingerprints. - `Transcode audio`_ to any format you like. - Check your library for `duplicate tracks and albums`_ or for `albums that @@ -68,16 +72,16 @@ http://beets.readthedocs.org/page/plugins/duplicates.html .. _Transcode audio: http://beets.readthedocs.org/page/plugins/convert.html - .. _Beatport: http://www.beatport.com/ .. _Discogs: http://www.discogs.com/ .. _acoustic fingerprints: http://beets.readthedocs.org/page/plugins/chroma.html .. _ReplayGain: http://beets.readthedocs.org/page/plugins/replaygain.html - .. _tempos: http://beets.readthedocs.org/page/plugins/echonest_tempo.html + .. _tempos: http://beets.readthedocs.org/page/plugins/acousticbrainz.html .. _genres: http://beets.readthedocs.org/page/plugins/lastgenre.html .. _album art: http://beets.readthedocs.org/page/plugins/fetchart.html .. _lyrics: http://beets.readthedocs.org/page/plugins/lyrics.html .. _MusicBrainz: http://musicbrainz.org/ + .. _Beatport: https://www.beatport.com Read More --------- @@ -88,7 +92,7 @@ You can install beets by typing ``pip install beets``. Then check out the `Getting Started`_ guide. - .. _its Web site: http://beets.radbox.org/ + .. _its Web site: http://beets.io/ .. _Getting Started: http://beets.readthedocs.org/page/guides/main.html .. _@b33ts: http://twitter.com/b33ts/ @@ -108,5 +112,4 @@ Classifier: Environment :: Console Classifier: Environment :: Web Environment Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 diff -Nru beets-1.3.8+dfsg/beets.egg-info/requires.txt beets-1.3.19/beets.egg-info/requires.txt --- beets-1.3.8+dfsg/beets.egg-info/requires.txt 2014-09-18 02:18:16.000000000 +0000 +++ beets-1.3.19/beets.egg-info/requires.txt 2016-06-26 00:52:50.000000000 +0000 @@ -1,24 +1,19 @@ -enum34 -mutagen>=1.23 +enum34>=1.0.4 +mutagen>=1.27 munkres unidecode musicbrainzngs>=0.4 pyyaml +jellyfish [beatport] -requests +requests-oauthlib>=0.6.1 [chroma] pyacoustid [discogs] -discogs-client>=2.0.0 - -[echonest] -pyechonest - -[echonest_tempo] -pyechonest +discogs-client>=2.1.0 [fetchart] requests @@ -29,8 +24,16 @@ [lastgenre] pylast +[metasync] +dbus-python + [mpdstats] -python-mpd +python-mpd2 + +[thumbnails] +pathlib +pyxdg [web] flask +flask-cors diff -Nru beets-1.3.8+dfsg/beets.egg-info/SOURCES.txt beets-1.3.19/beets.egg-info/SOURCES.txt --- beets-1.3.8+dfsg/beets.egg-info/SOURCES.txt 2014-09-18 02:18:17.000000000 +0000 +++ beets-1.3.19/beets.egg-info/SOURCES.txt 2016-06-26 00:52:53.000000000 +0000 @@ -4,9 +4,11 @@ setup.cfg setup.py beets/__init__.py +beets/art.py beets/config_default.yaml beets/importer.py beets/library.py +beets/logging.py beets/mediafile.py beets/plugins.py beets/vfs.py @@ -14,6 +16,7 @@ beets.egg-info/SOURCES.txt beets.egg-info/dependency_links.txt beets.egg-info/entry_points.txt +beets.egg-info/pbr.json beets.egg-info/requires.txt beets.egg-info/top_level.txt beets/autotag/__init__.py @@ -28,43 +31,55 @@ beets/ui/__init__.py beets/ui/commands.py beets/ui/completion_base.sh -beets/ui/migrate.py beets/util/__init__.py beets/util/artresizer.py beets/util/bluelet.py beets/util/confit.py beets/util/enumeration.py beets/util/functemplate.py +beets/util/hidden.py beets/util/pipeline.py beetsplug/__init__.py +beetsplug/acousticbrainz.py +beetsplug/badfiles.py beetsplug/beatport.py beetsplug/bench.py beetsplug/bpm.py beetsplug/bucket.py beetsplug/chroma.py beetsplug/convert.py +beetsplug/cue.py beetsplug/discogs.py beetsplug/duplicates.py -beetsplug/echonest.py -beetsplug/echonest_tempo.py +beetsplug/edit.py beetsplug/embedart.py +beetsplug/embyupdate.py +beetsplug/export.py beetsplug/fetchart.py +beetsplug/filefilter.py +beetsplug/freedesktop.py beetsplug/fromfilename.py beetsplug/ftintitle.py beetsplug/fuzzy.py +beetsplug/hook.py beetsplug/ihate.py beetsplug/importadded.py beetsplug/importfeeds.py beetsplug/info.py beetsplug/inline.py +beetsplug/ipfs.py beetsplug/keyfinder.py +beetsplug/lastimport.py beetsplug/lyrics.py beetsplug/mbcollection.py +beetsplug/mbsubmit.py beetsplug/mbsync.py beetsplug/missing.py beetsplug/mpdstats.py beetsplug/mpdupdate.py +beetsplug/permissions.py beetsplug/play.py +beetsplug/plexupdate.py beetsplug/random.py beetsplug/replaygain.py beetsplug/rewrite.py @@ -72,6 +87,7 @@ beetsplug/smartplaylist.py beetsplug/spotify.py beetsplug/the.py +beetsplug/thumbnails.py beetsplug/types.py beetsplug/zero.py beetsplug/bpd/__init__.py @@ -79,6 +95,9 @@ beetsplug/lastgenre/__init__.py beetsplug/lastgenre/genres-tree.yaml beetsplug/lastgenre/genres.txt +beetsplug/metasync/__init__.py +beetsplug/metasync/amarok.py +beetsplug/metasync/itunes.py beetsplug/web/__init__.py beetsplug/web/static/backbone.js beetsplug/web/static/beets.css @@ -91,6 +110,7 @@ docs/conf.py docs/faq.rst docs/index.rst +docs/serve.py docs/dev/api.rst docs/dev/index.rst docs/dev/media_file.rst @@ -98,8 +118,9 @@ docs/guides/advanced.rst docs/guides/index.rst docs/guides/main.rst -docs/guides/migration.rst docs/guides/tagger.rst +docs/plugins/acousticbrainz.rst +docs/plugins/badfiles.rst docs/plugins/beatport.rst docs/plugins/beetsweb.png docs/plugins/bpd.rst @@ -109,28 +130,38 @@ docs/plugins/convert.rst docs/plugins/discogs.rst docs/plugins/duplicates.rst -docs/plugins/echonest.rst -docs/plugins/echonest_tempo.rst +docs/plugins/edit.rst docs/plugins/embedart.rst +docs/plugins/embyupdate.rst +docs/plugins/export.rst docs/plugins/fetchart.rst +docs/plugins/filefilter.rst +docs/plugins/freedesktop.rst docs/plugins/fromfilename.rst docs/plugins/ftintitle.rst docs/plugins/fuzzy.rst +docs/plugins/hook.rst docs/plugins/ihate.rst docs/plugins/importadded.rst docs/plugins/importfeeds.rst docs/plugins/index.rst docs/plugins/info.rst docs/plugins/inline.rst +docs/plugins/ipfs.rst docs/plugins/keyfinder.rst docs/plugins/lastgenre.rst +docs/plugins/lastimport.rst docs/plugins/lyrics.rst docs/plugins/mbcollection.rst +docs/plugins/mbsubmit.rst docs/plugins/mbsync.rst +docs/plugins/metasync.rst docs/plugins/missing.rst docs/plugins/mpdstats.rst docs/plugins/mpdupdate.rst +docs/plugins/permissions.rst docs/plugins/play.rst +docs/plugins/plexupdate.rst docs/plugins/random.rst docs/plugins/replaygain.rst docs/plugins/rewrite.rst @@ -138,6 +169,7 @@ docs/plugins/smartplaylist.rst docs/plugins/spotify.rst docs/plugins/the.rst +docs/plugins/thumbnails.rst docs/plugins/types.rst docs/plugins/web.rst docs/plugins/zero.rst @@ -151,7 +183,7 @@ test/__init__.py test/_common.py test/helper.py -test/lyrics_sources.py +test/lyrics_download_samples.py test/test_art.py test/test_autotag.py test/test_bucket.py @@ -159,42 +191,66 @@ test/test_convert.py test/test_datequery.py test/test_dbcore.py -test/test_echonest.py +test/test_edit.py test/test_embedart.py +test/test_embyupdate.py test/test_fetchart.py +test/test_filefilter.py test/test_files.py +test/test_ftintitle.py +test/test_hidden.py +test/test_hook.py test/test_ihate.py +test/test_importadded.py test/test_importer.py test/test_importfeeds.py test/test_info.py +test/test_ipfs.py test/test_keyfinder.py test/test_lastgenre.py test/test_library.py +test/test_logging.py test/test_lyrics.py test/test_mb.py +test/test_mbsubmit.py test/test_mbsync.py test/test_mediafile.py test/test_mediafile_edge.py +test/test_metasync.py +test/test_mpdstats.py +test/test_permissions.py test/test_pipeline.py +test/test_play.py test/test_player.py +test/test_plexupdate.py test/test_plugins.py test/test_query.py test/test_replaygain.py +test/test_smartplaylist.py test/test_sort.py test/test_spotify.py test/test_template.py test/test_the.py +test/test_thumbnails.py test/test_types_plugin.py test/test_ui.py +test/test_ui_commands.py test/test_ui_importer.py +test/test_ui_init.py +test/test_util.py test/test_vfs.py test/test_web.py test/test_zero.py test/testall.py +test/rsrc/abbey-different.jpg +test/rsrc/abbey-similar.jpg +test/rsrc/abbey.jpg test/rsrc/archive.rar test/rsrc/bpm.mp3 +test/rsrc/convert_stub.py test/rsrc/coverart.ogg test/rsrc/date.mp3 +test/rsrc/date_with_slashes.ogg test/rsrc/discc.ogg test/rsrc/empty.aiff test/rsrc/empty.alac.m4a @@ -222,21 +278,29 @@ test/rsrc/image-2x3.jpg test/rsrc/image-2x3.png test/rsrc/image-2x3.tiff +test/rsrc/image.ape test/rsrc/image.flac test/rsrc/image.m4a test/rsrc/image.mp3 test/rsrc/image.ogg test/rsrc/image.wma +test/rsrc/image_unknown_type.mp3 +test/rsrc/itunes_library_unix.xml +test/rsrc/itunes_library_windows.xml test/rsrc/lyricstext.yaml test/rsrc/min.flac test/rsrc/min.m4a test/rsrc/min.mp3 test/rsrc/oldape.ape +test/rsrc/only-magic-bytes.jpg test/rsrc/partial.flac test/rsrc/partial.m4a test/rsrc/partial.mp3 +test/rsrc/pure.wma +test/rsrc/soundcheck-nonascii.m4a test/rsrc/space_time.mp3 test/rsrc/t_time.m4a +test/rsrc/test_completion.sh test/rsrc/unicode’d.mp3 test/rsrc/unparseable.aiff test/rsrc/unparseable.alac.m4a @@ -251,22 +315,6 @@ test/rsrc/unparseable.wv test/rsrc/year.ogg test/rsrc/beetsplug/test.py -test/rsrc/beetsplug/test.pyc -test/rsrc/lyrics/absolutelyricscom/ladymadonna.txt -test/rsrc/lyrics/azlyricscom/ladymadonnahtml.txt -test/rsrc/lyrics/chartlyricscom/LadyMadonnaaspx.txt -test/rsrc/lyrics/elyricsworldcom/ladymadonnalyricsbeatleshtml.txt -test/rsrc/lyrics/lacoccinellenet/550512html.txt -test/rsrc/lyrics/lyrics007com/Lady20Madonna20Lyricshtml.txt -test/rsrc/lyrics/lyricscom/ladymadonnalyricsthebeatleshtml.txt -test/rsrc/lyrics/lyricsmaniacom/heyitsoklyricslillywoodandtheprickhtml.txt -test/rsrc/lyrics/lyricsontopcom/jazznblueslyricshtml.txt -test/rsrc/lyrics/lyricswikiacom/TheBeatlesLadyMadonna.txt -test/rsrc/lyrics/metrolyricscom/bestforlastlyricsadelehtml.txt -test/rsrc/lyrics/parolesnet/parolesheyitsok.txt -test/rsrc/lyrics/reggaelyricsinfo/icouldbeatmyself.txt -test/rsrc/lyrics/releaselyricscom/thebeatlesladymadonna.txt -test/rsrc/lyrics/smartlyricscom/Song18148TheBeatlesLadyMadonnalyricsaspx.txt -test/rsrc/lyrics/songlyricscom/ladymadonnalyrics.txt -test/rsrc/lyrics/sweetslyricscom/761696The20Beatles2020Lady20Madonnahtml.txt -test/rsrc/lyrics/sweetslyricscom/best-for-last-lyrics-adelehtml.txt.html \ No newline at end of file +test/rsrc/lyrics/examplecom/beetssong.txt +test/rsrc/spotify/missing_request.json +test/rsrc/spotify/track_request.json \ No newline at end of file diff -Nru beets-1.3.8+dfsg/beets.egg-info/top_level.txt beets-1.3.19/beets.egg-info/top_level.txt --- beets-1.3.8+dfsg/beets.egg-info/top_level.txt 2014-09-18 02:18:16.000000000 +0000 +++ beets-1.3.19/beets.egg-info/top_level.txt 2016-06-26 00:52:50.000000000 +0000 @@ -1,2 +1,2 @@ -beetsplug beets +beetsplug diff -Nru beets-1.3.8+dfsg/beetsplug/acousticbrainz.py beets-1.3.19/beetsplug/acousticbrainz.py --- beets-1.3.8+dfsg/beetsplug/acousticbrainz.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/acousticbrainz.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2015-2016, Ohm Patel. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Fetch various AcousticBrainz metadata using MBID. +""" +from __future__ import division, absolute_import, print_function + +import requests +import operator + +from beets import plugins, ui +from functools import reduce + +ACOUSTIC_BASE = "https://acousticbrainz.org/" +LEVELS = ["/low-level", "/high-level"] + + +class AcousticPlugin(plugins.BeetsPlugin): + def __init__(self): + super(AcousticPlugin, self).__init__() + + self.config.add({'auto': True}) + if self.config['auto']: + self.register_listener('import_task_files', + self.import_task_files) + + def commands(self): + cmd = ui.Subcommand('acousticbrainz', + help=u"fetch metadata from AcousticBrainz") + + def func(lib, opts, args): + items = lib.items(ui.decargs(args)) + fetch_info(self._log, items, ui.should_write()) + + cmd.func = func + return [cmd] + + def import_task_files(self, session, task): + """Function is called upon beet import. + """ + + items = task.imported_items() + fetch_info(self._log, items, False) + + +def fetch_info(log, items, write): + """Get data from AcousticBrainz for the items. + """ + + def get_value(*map_path): + try: + return reduce(operator.getitem, map_path, data) + except KeyError: + log.debug(u'Invalid Path: {}', map_path) + + for item in items: + if item.mb_trackid: + log.info(u'getting data for: {}', item) + + # Fetch the data from the AB API. + urls = [generate_url(item.mb_trackid, path) for path in LEVELS] + log.debug(u'fetching URLs: {}', ' '.join(urls)) + try: + res = [requests.get(url) for url in urls] + except requests.RequestException as exc: + log.info(u'request error: {}', exc) + continue + + # Check for missing tracks. + if any(r.status_code == 404 for r in res): + log.info(u'recording ID {} not found', item.mb_trackid) + continue + + # Parse the JSON response. + try: + data = res[0].json() + data.update(res[1].json()) + except ValueError: + log.debug(u'Invalid Response: {} & {}', [r.text for r in res]) + + # Get each field and assign it on the item. + item.danceable = get_value( + "highlevel", "danceability", "all", "danceable", + ) + item.gender = get_value( + "highlevel", "gender", "value", + ) + item.genre_rosamerica = get_value( + "highlevel", "genre_rosamerica", "value" + ) + item.mood_acoustic = get_value( + "highlevel", "mood_acoustic", "all", "acoustic" + ) + item.mood_aggressive = get_value( + "highlevel", "mood_aggressive", "all", "aggressive" + ) + item.mood_electronic = get_value( + "highlevel", "mood_electronic", "all", "electronic" + ) + item.mood_happy = get_value( + "highlevel", "mood_happy", "all", "happy" + ) + item.mood_party = get_value( + "highlevel", "mood_party", "all", "party" + ) + item.mood_relaxed = get_value( + "highlevel", "mood_relaxed", "all", "relaxed" + ) + item.mood_sad = get_value( + "highlevel", "mood_sad", "all", "sad" + ) + item.rhythm = get_value( + "highlevel", "ismir04_rhythm", "value" + ) + item.tonal = get_value( + "highlevel", "tonal_atonal", "all", "tonal" + ) + item.voice_instrumental = get_value( + "highlevel", "voice_instrumental", "value" + ) + item.average_loudness = get_value( + "lowlevel", "average_loudness" + ) + item.chords_changes_rate = get_value( + "tonal", "chords_changes_rate" + ) + item.chords_key = get_value( + "tonal", "chords_key" + ) + item.chords_number_rate = get_value( + "tonal", "chords_number_rate" + ) + item.chords_scale = get_value( + "tonal", "chords_scale" + ) + item.initial_key = '{} {}'.format( + get_value("tonal", "key_key"), + get_value("tonal", "key_scale") + ) + item.key_strength = get_value( + "tonal", "key_strength" + ) + + # Store the data. + item.store() + if write: + item.try_write() + + +def generate_url(mbid, level): + """Generates AcousticBrainz end point url for given MBID. + """ + return ACOUSTIC_BASE + mbid + level diff -Nru beets-1.3.8+dfsg/beetsplug/badfiles.py beets-1.3.19/beetsplug/badfiles.py --- beets-1.3.8+dfsg/beetsplug/badfiles.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/badfiles.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, François-Xavier Thomas. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Use command-line tools to check for audio file corruption. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand +from beets.util import displayable_path, confit +from beets import ui +from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT +import shlex +import os +import errno +import sys + + +class BadFiles(BeetsPlugin): + def run_command(self, cmd): + self._log.debug(u"running command: {}", + displayable_path(list2cmdline(cmd))) + try: + output = check_output(cmd, stderr=STDOUT) + errors = 0 + status = 0 + except CalledProcessError as e: + output = e.output + errors = 1 + status = e.returncode + except OSError as e: + if e.errno == errno.ENOENT: + ui.print_(u"command not found: {}".format(cmd[0])) + sys.exit(1) + else: + raise + output = output.decode(sys.getfilesystemencoding()) + return status, errors, [line for line in output.split("\n") if line] + + def check_mp3val(self, path): + status, errors, output = self.run_command(["mp3val", path]) + if status == 0: + output = [line for line in output if line.startswith("WARNING:")] + errors = len(output) + return status, errors, output + + def check_flac(self, path): + return self.run_command(["flac", "-wst", path]) + + def check_custom(self, command): + def checker(path): + cmd = shlex.split(command) + cmd.append(path) + return self.run_command(cmd) + return checker + + def get_checker(self, ext): + ext = ext.lower() + try: + command = self.config['commands'].get(dict).get(ext) + except confit.NotFoundError: + command = None + if command: + return self.check_custom(command) + elif ext == "mp3": + return self.check_mp3val + elif ext == "flac": + return self.check_flac + + def check_bad(self, lib, opts, args): + for item in lib.items(ui.decargs(args)): + + # First, check whether the path exists. If not, the user + # should probably run `beet update` to cleanup your library. + dpath = displayable_path(item.path) + self._log.debug(u"checking path: {}", dpath) + if not os.path.exists(item.path): + ui.print_(u"{}: file does not exist".format( + ui.colorize('text_error', dpath))) + + # Run the checker against the file if one is found + ext = os.path.splitext(item.path)[1][1:] + checker = self.get_checker(ext) + if not checker: + continue + path = item.path + if not isinstance(path, unicode): + path = item.path.decode(sys.getfilesystemencoding()) + status, errors, output = checker(path) + if status > 0: + ui.print_(u"{}: checker exited withs status {}" + .format(ui.colorize('text_error', dpath), status)) + for line in output: + ui.print_(" {}".format(displayable_path(line))) + elif errors > 0: + ui.print_(u"{}: checker found {} errors or warnings" + .format(ui.colorize('text_warning', dpath), errors)) + for line in output: + ui.print_(u" {}".format(displayable_path(line))) + else: + ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath))) + + def commands(self): + bad_command = Subcommand('bad', + help=u'check for corrupt or missing files') + bad_command.func = self.check_bad + return [bad_command] diff -Nru beets-1.3.8+dfsg/beetsplug/beatport.py beets-1.3.19/beetsplug/beatport.py --- beets-1.3.8+dfsg/beetsplug/beatport.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/beatport.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,5 @@ # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,16 +14,25 @@ """Adds Beatport release and track search support to the autotagger """ -import logging +from __future__ import division, absolute_import, print_function + +import json import re from datetime import datetime, timedelta -import requests +from requests_oauthlib import OAuth1Session +from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing, + VerifierMissing) +import beets +import beets.ui from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance from beets.plugins import BeetsPlugin +from beets.util import confit + -log = logging.getLogger('beets') +AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing) +USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__) class BeatportAPIError(Exception): @@ -45,15 +54,140 @@ for x in data['genres']] -class BeatportAPI(object): - API_BASE = 'http://api.beatport.com/' +class BeatportClient(object): + _api_base = 'https://oauth-api.beatport.com' - @classmethod - def get(cls, endpoint, **kwargs): + def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None): + """ Initiate the client with OAuth information. + + For the initial authentication with the backend `auth_key` and + `auth_secret` can be `None`. Use `get_authorize_url` and + `get_access_token` to obtain them for subsequent uses of the API. + + :param c_key: OAuth1 client key + :param c_secret: OAuth1 client secret + :param auth_key: OAuth1 resource owner key + :param auth_secret: OAuth1 resource owner secret + """ + self.api = OAuth1Session( + client_key=c_key, client_secret=c_secret, + resource_owner_key=auth_key, + resource_owner_secret=auth_secret, + callback_uri='oob') + self.api.headers = {'User-Agent': USER_AGENT} + + def get_authorize_url(self): + """ Generate the URL for the user to authorize the application. + + Retrieves a request token from the Beatport API and returns the + corresponding authorization URL on their end that the user has + to visit. + + This is the first step of the initial authorization process with the + API. Once the user has visited the URL, call + :py:method:`get_access_token` with the displayed data to complete + the process. + + :returns: Authorization URL for the user to visit + :rtype: unicode + """ + self.api.fetch_request_token( + self._make_url('/identity/1/oauth/request-token')) + return self.api.authorization_url( + self._make_url('/identity/1/oauth/authorize')) + + def get_access_token(self, auth_data): + """ Obtain the final access token and secret for the API. + + :param auth_data: URL-encoded authorization data as displayed at + the authorization url (obtained via + :py:meth:`get_authorize_url`) after signing in + :type auth_data: unicode + :returns: OAuth resource owner key and secret + :rtype: (unicode, unicode) tuple + """ + self.api.parse_authorization_response( + "http://beets.io/auth?" + auth_data) + access_data = self.api.fetch_access_token( + self._make_url('/identity/1/oauth/access-token')) + return access_data['oauth_token'], access_data['oauth_token_secret'] + + def search(self, query, release_type='release', details=True): + """ Perform a search of the Beatport catalogue. + + :param query: Query string + :param release_type: Type of releases to search for, can be + 'release' or 'track' + :param details: Retrieve additional information about the + search results. Currently this will fetch + the tracklist for releases and do nothing for + tracks + :returns: Search results + :rtype: generator that yields + py:class:`BeatportRelease` or + :py:class:`BeatportTrack` + """ + response = self._get('catalog/3/search', + query=query, perPage=5, + facets=['fieldType:{0}'.format(release_type)]) + for item in response: + if release_type == 'release': + if details: + release = self.get_release(item['id']) + else: + release = BeatportRelease(item) + yield release + elif release_type == 'track': + yield BeatportTrack(item) + + def get_release(self, beatport_id): + """ Get information about a single release. + + :param beatport_id: Beatport ID of the release + :returns: The matching release + :rtype: :py:class:`BeatportRelease` + """ + response = self._get('/catalog/3/releases', id=beatport_id) + release = BeatportRelease(response[0]) + release.tracks = self.get_release_tracks(beatport_id) + return release + + def get_release_tracks(self, beatport_id): + """ Get all tracks for a given release. + + :param beatport_id: Beatport ID of the release + :returns: Tracks in the matching release + :rtype: list of :py:class:`BeatportTrack` + """ + response = self._get('/catalog/3/tracks', releaseId=beatport_id) + return [BeatportTrack(t) for t in response] + + def get_track(self, beatport_id): + """ Get information about a single track. + + :param beatport_id: Beatport ID of the track + :returns: The matching track + :rtype: :py:class:`BeatportTrack` + """ + response = self._get('/catalog/3/tracks', id=beatport_id) + return BeatportTrack(response[0]) + + def _make_url(self, endpoint): + """ Get complete URL for a given API endpoint. """ + if not endpoint.startswith('/'): + endpoint = '/' + endpoint + return self._api_base + endpoint + + def _get(self, endpoint, **kwargs): + """ Perform a GET request on a given API endpoint. + + Automatically extracts result data from the response and converts HTTP + exceptions into :py:class:`BeatportAPIError` objects. + """ try: - response = requests.get(cls.API_BASE + endpoint, params=kwargs) + response = self.api.get(self._make_url(endpoint), params=kwargs) except Exception as e: - raise BeatportAPIError("Error connection to Beatport API: {}" + raise BeatportAPIError("Error connecting to Beatport API: {}" .format(e.message)) if not response: raise BeatportAPIError( @@ -62,35 +196,7 @@ return response.json()['results'] -class BeatportSearch(object): - query = None - release_type = None - - def __unicode__(self): - return u''.format( - self.release_type, self.query, len(self.results)) - - def __init__(self, query, release_type='release', details=True): - self.results = [] - self.query = query - self.release_type = release_type - response = BeatportAPI.get('catalog/3/search', query=query, - facets=['fieldType:{0}' - .format(release_type)], - perPage=5) - for item in response: - if release_type == 'release': - release = BeatportRelease(item) - if details: - release.get_tracks() - self.results.append(release) - elif release_type == 'track': - self.results.append(BeatportTrack(item)) - - class BeatportRelease(BeatportObject): - API_ENDPOINT = 'catalog/3/beatport/release' - def __unicode__(self): if len(self.artists) < 4: artist_str = ", ".join(x[1] for x in self.artists) @@ -102,6 +208,9 @@ self.catalog_number, ) + def __repr__(self): + return unicode(self).encode('utf8') + def __init__(self, data): BeatportObject.__init__(self, data) if 'catalogNumber' in data: @@ -114,26 +223,15 @@ self.url = "http://beatport.com/release/{0}/{1}".format( data['slug'], data['id']) - @classmethod - def from_id(cls, beatport_id): - response = BeatportAPI.get(cls.API_ENDPOINT, id=beatport_id) - release = BeatportRelease(response['release']) - release.tracks = [BeatportTrack(x) for x in response['tracks']] - return release - - def get_tracks(self): - response = BeatportAPI.get(self.API_ENDPOINT, id=self.beatport_id) - self.tracks = [BeatportTrack(x) for x in response['tracks']] - class BeatportTrack(BeatportObject): - API_ENDPOINT = 'catalog/3/beatport/track' - def __unicode__(self): artist_str = ", ".join(x[1] for x in self.artists) - return u"".format(artist_str, - self.name, - self.mix_name) + return (u"" + .format(artist_str, self.name, self.mix_name)) + + def __repr__(self): + return unicode(self).encode('utf8') def __init__(self, data): BeatportObject.__init__(self, data) @@ -152,18 +250,69 @@ self.url = "http://beatport.com/track/{0}/{1}".format(data['slug'], data['id']) - @classmethod - def from_id(cls, beatport_id): - response = BeatportAPI.get(cls.API_ENDPOINT, id=beatport_id) - return BeatportTrack(response['track']) - class BeatportPlugin(BeetsPlugin): def __init__(self): super(BeatportPlugin, self).__init__() self.config.add({ + 'apikey': '57713c3906af6f5def151b33601389176b37b429', + 'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954', + 'tokenfile': 'beatport_token.json', 'source_weight': 0.5, }) + self.config['apikey'].redact = True + self.config['apisecret'].redact = True + self.client = None + self.register_listener('import_begin', self.setup) + + def setup(self, session=None): + c_key = self.config['apikey'].get(unicode) + c_secret = self.config['apisecret'].get(unicode) + + # Get the OAuth token from a file or log in. + try: + with open(self._tokenfile()) as f: + tokendata = json.load(f) + except IOError: + # No token yet. Generate one. + token, secret = self.authenticate(c_key, c_secret) + else: + token = tokendata['token'] + secret = tokendata['secret'] + + self.client = BeatportClient(c_key, c_secret, token, secret) + + def authenticate(self, c_key, c_secret): + # Get the link for the OAuth page. + auth_client = BeatportClient(c_key, c_secret) + try: + url = auth_client.get_authorize_url() + except AUTH_ERRORS as e: + self._log.debug(u'authentication error: {0}', e) + raise beets.ui.UserError(u'communication with Beatport failed') + + beets.ui.print_(u"To authenticate with Beatport, visit:") + beets.ui.print_(url) + + # Ask for the verifier data and validate it. + data = beets.ui.input_(u"Enter the string displayed in your browser:") + try: + token, secret = auth_client.get_access_token(data) + except AUTH_ERRORS as e: + self._log.debug(u'authentication error: {0}', e) + raise beets.ui.UserError(u'Beatport token request failed') + + # Save the token for later use. + self._log.debug(u'Beatport token {0}, secret {1}', token, secret) + with open(self._tokenfile(), 'w') as f: + json.dump({'token': token, 'secret': secret}, f) + + return token, secret + + def _tokenfile(self): + """Get the path to the JSON file for storing the OAuth token. + """ + return self.config['tokenfile'].get(confit.Filename(in_app_dir=True)) def album_distance(self, items, album_info, mapping): """Returns the beatport source weight and the maximum source weight @@ -194,7 +343,7 @@ try: return self._get_releases(query) except BeatportAPIError as e: - log.debug(u'Beatport API Error: {0} (query: {1})'.format(e, query)) + self._log.debug(u'API Error: {0} (query: {1})', e, query) return [] def item_candidates(self, item, artist, title): @@ -205,18 +354,18 @@ try: return self._get_tracks(query) except BeatportAPIError as e: - log.debug(u'Beatport API Error: {0} (query: {1})'.format(e, query)) + self._log.debug(u'API Error: {0} (query: {1})', e, query) return [] def album_for_id(self, release_id): """Fetches a release by its Beatport ID and returns an AlbumInfo object or None if the release is not found. """ - log.debug(u'Searching Beatport for release {0}'.format(release_id)) + self._log.debug(u'Searching for release {0}', release_id) match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id) if not match: return None - release = BeatportRelease.from_id(match.group(2)) + release = self.client.get_release(match.group(2)) album = self._get_album_info(release) return album @@ -224,11 +373,11 @@ """Fetches a track by its Beatport ID and returns a TrackInfo object or None if the track is not found. """ - log.debug(u'Searching Beatport for track {0}'.format(str(track_id))) + self._log.debug(u'Searching for track {0}', track_id) match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id) if not match: return None - bp_track = BeatportTrack.from_id(match.group(2)) + bp_track = self.client.get_track(match.group(2)) track = self._get_track_info(bp_track) return track @@ -244,7 +393,7 @@ # can also negate an otherwise positive result. query = re.sub(r'\b(CD|disc)\s*\d+', '', query, re.I) albums = [self._get_album_info(x) - for x in BeatportSearch(query).results] + for x in self.client.search(query)] return albums def _get_album_info(self, release): @@ -302,6 +451,6 @@ def _get_tracks(self, query): """Returns a list of TrackInfo objects for a Beatport query. """ - bp_tracks = BeatportSearch(query, release_type='track').results + bp_tracks = self.client.search(query, release_type='track') tracks = [self._get_track_info(x) for x in bp_tracks] return tracks diff -Nru beets-1.3.8+dfsg/beetsplug/bench.py beets-1.3.19/beetsplug/bench.py --- beets-1.3.8+dfsg/beetsplug/bench.py 2014-04-14 00:45:25.000000000 +0000 +++ beets-1.3.19/beetsplug/bench.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,7 +15,8 @@ """Some simple performance benchmarks for beets. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin from beets import ui @@ -73,7 +75,7 @@ # Run the match. def _run_match(): - match.tag_album(items, search_id=album_id) + match.tag_album(items, search_ids=[album_id]) if prof: cProfile.runctx('_run_match()', {}, {'_run_match': _run_match}, 'match.prof') diff -Nru beets-1.3.8+dfsg/beetsplug/bpd/gstplayer.py beets-1.3.19/beetsplug/bpd/gstplayer.py --- beets-1.3.8+dfsg/beetsplug/bpd/gstplayer.py 2014-04-14 00:39:49.000000000 +0000 +++ beets-1.3.19/beetsplug/bpd/gstplayer.py 2016-06-20 17:09:11.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,19 +16,27 @@ """A wrapper for the GStreamer Python bindings that exposes a simple music player. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import sys import time -import gobject import thread import os import copy import urllib +from beets import ui + +import gi +from gi.repository import GLib, Gst + +gi.require_version('Gst', '1.0') + +Gst.init(None) -import pygst -pygst.require('0.10') -import gst + +class QueryError(Exception): + pass class GstPlayer(object): @@ -55,8 +64,19 @@ # Set up the Gstreamer player. From the pygst tutorial: # http://pygstdocs.berlios.de/pygst-tutorial/playbin.html - self.player = gst.element_factory_make("playbin2", "player") - fakesink = gst.element_factory_make("fakesink", "fakesink") + #### + # Updated to GStreamer 1.0 with: + # https://wiki.ubuntu.com/Novacut/GStreamer1.0 + self.player = Gst.ElementFactory.make("playbin", "player") + + if self.player is None: + raise ui.UserError("Could not create playbin") + + fakesink = Gst.ElementFactory.make("fakesink", "fakesink") + + if fakesink is None: + raise ui.UserError("Could not create fakesink") + self.player.set_property("video-sink", fakesink) bus = self.player.get_bus() bus.add_signal_watch() @@ -72,23 +92,23 @@ """Returns the current state flag of the playbin.""" # gst's get_state function returns a 3-tuple; we just want the # status flag in position 1. - return self.player.get_state()[1] + return self.player.get_state(Gst.CLOCK_TIME_NONE)[1] def _handle_message(self, bus, message): """Callback for status updates from GStreamer.""" - if message.type == gst.MESSAGE_EOS: + if message.type == Gst.MessageType.EOS: # file finished playing - self.player.set_state(gst.STATE_NULL) + self.player.set_state(Gst.State.NULL) self.playing = False self.cached_time = None if self.finished_callback: self.finished_callback() - elif message.type == gst.MESSAGE_ERROR: + elif message.type == Gst.MessageType.ERROR: # error - self.player.set_state(gst.STATE_NULL) + self.player.set_state(Gst.State.NULL) err, debug = message.parse_error() - print("Error: " + str(err)) + print(u"Error: {0}".format(err)) self.playing = False def _set_volume(self, volume): @@ -107,27 +127,27 @@ """Immediately begin playing the audio file at the given path. """ - self.player.set_state(gst.STATE_NULL) + self.player.set_state(Gst.State.NULL) if isinstance(path, unicode): path = path.encode('utf8') uri = 'file://' + urllib.quote(path) self.player.set_property("uri", uri) - self.player.set_state(gst.STATE_PLAYING) + self.player.set_state(Gst.State.PLAYING) self.playing = True def play(self): """If paused, resume playback.""" - if self._get_state() == gst.STATE_PAUSED: - self.player.set_state(gst.STATE_PLAYING) + if self._get_state() == Gst.State.PAUSED: + self.player.set_state(Gst.State.PLAYING) self.playing = True def pause(self): """Pause playback.""" - self.player.set_state(gst.STATE_PAUSED) + self.player.set_state(Gst.State.PAUSED) def stop(self): """Halt playback.""" - self.player.set_state(gst.STATE_NULL) + self.player.set_state(Gst.State.NULL) self.playing = False self.cached_time = None @@ -137,12 +157,13 @@ Call this function before trying to play any music with play_file() or play(). """ + # If we don't use the MainLoop, messages are never sent. - gobject.threads_init() def start(): - loop = gobject.MainLoop() + loop = GLib.MainLoop() loop.run() + thread.start_new_thread(start, ()) def time(self): @@ -150,14 +171,22 @@ values are integers in seconds. If no stream is available, returns (0, 0). """ - fmt = gst.Format(gst.FORMAT_TIME) + fmt = Gst.Format(Gst.Format.TIME) try: - pos = self.player.query_position(fmt, None)[0] / (10 ** 9) - length = self.player.query_duration(fmt, None)[0] / (10 ** 9) + posq = self.player.query_position(fmt) + if not posq[0]: + raise QueryError("query_position failed") + pos = posq[1] / (10 ** 9) + + lengthq = self.player.query_duration(fmt) + if not lengthq[0]: + raise QueryError("query_duration failed") + length = lengthq[1] / (10 ** 9) + self.cached_time = (pos, length) return (pos, length) - except gst.QueryError: + except QueryError: # Stream not ready. For small gaps of time, for instance # after seeking, the time values are unavailable. For this # reason, we cache recent. @@ -173,9 +202,9 @@ self.stop() return - fmt = gst.Format(gst.FORMAT_TIME) + fmt = Gst.Format(Gst.Format.TIME) ns = position * 10 ** 9 # convert to nanoseconds - self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns) + self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns) # save new cached time self.cached_time = (position, cur_len) @@ -206,12 +235,14 @@ def next_song(): my_paths.pop(0) p.play_file(my_paths[0]) + p = GstPlayer(next_song) p.run() p.play_file(my_paths[0]) while my_paths: time.sleep(1) + if __name__ == '__main__': # A very simple command-line player. Just give it names of audio # files on the command line; these are all played in sequence. diff -Nru beets-1.3.8+dfsg/beetsplug/bpd/__init__.py beets-1.3.19/beetsplug/bpd/__init__.py --- beets-1.3.8+dfsg/beetsplug/bpd/__init__.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/bpd/__init__.py 2016-06-20 17:09:11.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -16,18 +17,19 @@ Beets library. Attempts to implement a compatible protocol to allow use of the wide range of MPD clients. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import re from string import Template import traceback -import logging import random import time import beets from beets.plugins import BeetsPlugin import beets.ui +from beets import logging from beets import vfs from beets.util import bluelet from beets.library import Item @@ -121,9 +123,9 @@ pass return NewBPDError -ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument') -ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range') -ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found') +ArgumentTypeError = make_bpd_error(ERROR_ARG, u'invalid type for argument') +ArgumentIndexError = make_bpd_error(ERROR_ARG, u'argument out of range') +ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, u'argument not found') def cast_arg(t, val): @@ -266,7 +268,7 @@ conn.authenticated = True else: conn.authenticated = False - raise BPDError(ERROR_PASSWORD, 'incorrect password') + raise BPDError(ERROR_PASSWORD, u'incorrect password') def cmd_commands(self, conn): """Lists the commands available to the user.""" @@ -570,7 +572,7 @@ if isinstance(lines, basestring): lines = [lines] out = NEWLINE.join(lines) + NEWLINE - log.debug(out[:-1]) # Don't log trailing newline. + log.debug('{}', out[:-1]) # Don't log trailing newline. if isinstance(out, unicode): out = out.encode('utf8') return self.sock.sendall(out) @@ -601,7 +603,7 @@ line = line.strip() if not line: break - log.debug(line) + log.debug('{}', line) if clist is not None: # Command list already opened. @@ -637,8 +639,8 @@ """A command issued by the client for processing by the server. """ - command_re = re.compile(r'^([^ \t]+)[ \t]*') - arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)') + command_re = re.compile(br'^([^ \t]+)[ \t]*') + arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)') def __init__(self, s): """Creates a new `Command` from the given string, `s`, parsing @@ -653,7 +655,7 @@ if match[0]: # Quoted argument. arg = match[0] - arg = arg.replace('\\"', '"').replace('\\\\', '\\') + arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\') else: # Unquoted argument. arg = match[1] @@ -696,7 +698,7 @@ except Exception as e: # An "unintentional" error. Hide it from the client. - log.error(traceback.format_exc(e)) + log.error('{}', traceback.format_exc(e)) raise BPDError(ERROR_SYSTEM, u'server error', self.name) @@ -804,9 +806,9 @@ """ # Path is ignored. Also, the real MPD does this asynchronously; # this is done inline. - print('Building directory tree...') + print(u'Building directory tree...') self.tree = vfs.libtree(self.lib) - print('... done.') + print(u'... done.') self.updated_time = time.time() # Path (directory tree) browsing. @@ -845,7 +847,7 @@ node = self._resolve_path(path) if isinstance(node, int): # Trying to list a track. - raise BPDError(ERROR_ARG, 'this is not a directory') + raise BPDError(ERROR_ARG, u'this is not a directory') else: for name, itemid in iter(sorted(node.files.items())): item = self.lib.get_item(itemid) @@ -1149,38 +1151,43 @@ 'host': u'', 'port': 6600, 'password': u'', + 'volume': VOLUME_MAX, }) + self.config['password'].redact = True - def start_bpd(self, lib, host, port, password, debug): + def start_bpd(self, lib, host, port, password, volume, debug): """Starts a BPD server.""" - if debug: - log.setLevel(logging.DEBUG) + if debug: # FIXME this should be managed by BeetsPlugin + self._log.setLevel(logging.DEBUG) else: - log.setLevel(logging.WARNING) + self._log.setLevel(logging.WARNING) try: - Server(lib, host, port, password).run() + server = Server(lib, host, port, password) + server.cmd_setvol(None, volume) + server.run() except NoGstreamerError: global_log.error(u'Gstreamer Python bindings not found.') - global_log.error(u'Install "python-gst0.10", "py27-gst-python", ' + global_log.error(u'Install "gstreamer1.0" and "python-gi"' u'or similar package to use BPD.') def commands(self): cmd = beets.ui.Subcommand( - 'bpd', help='run an MPD-compatible music player server' + 'bpd', help=u'run an MPD-compatible music player server' ) cmd.parser.add_option( '-d', '--debug', action='store_true', - help='dump all MPD traffic to stdout' + help=u'dump all MPD traffic to stdout' ) def func(lib, opts, args): host = args.pop(0) if args else self.config['host'].get(unicode) port = args.pop(0) if args else self.config['port'].get(int) if args: - raise beets.ui.UserError('too many arguments') + raise beets.ui.UserError(u'too many arguments') password = self.config['password'].get(unicode) + volume = self.config['volume'].get(int) debug = opts.debug or False - self.start_bpd(lib, host, int(port), password, debug) + self.start_bpd(lib, host, int(port), password, volume, debug) cmd.func = func return [cmd] diff -Nru beets-1.3.8+dfsg/beetsplug/bpm.py beets-1.3.19/beetsplug/bpm.py --- beets-1.3.8+dfsg/beetsplug/bpm.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/bpm.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, aroquen +# Copyright 2016, aroquen # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,14 +15,13 @@ """Determine BPM by pressing a key to the rhythm.""" +from __future__ import division, absolute_import, print_function + import time -import logging from beets import ui from beets.plugins import BeetsPlugin -log = logging.getLogger('beets') - def bpm(max_strokes): """Returns average BPM (possibly of a playing song) @@ -58,8 +58,8 @@ def commands(self): cmd = ui.Subcommand('bpm', - help='determine bpm of a song by pressing \ - a key to the rhythm') + help=u'determine bpm of a song by pressing ' + u'a key to the rhythm') cmd.func = self.command return [cmd] @@ -69,19 +69,19 @@ def get_bpm(self, items, write=False): overwrite = self.config['overwrite'].get(bool) if len(items) > 1: - raise ValueError('Can only get bpm of one song at time') + raise ValueError(u'Can only get bpm of one song at time') item = items[0] if item['bpm']: - log.info(u'Found bpm {0}'.format(item['bpm'])) + self._log.info(u'Found bpm {0}', item['bpm']) if not overwrite: return - log.info(u'Press Enter {0} times to the rhythm or Ctrl-D ' - u'to exit'.format(self.config['max_strokes'].get(int))) + self._log.info(u'Press Enter {0} times to the rhythm or Ctrl-D ' + u'to exit', self.config['max_strokes'].get(int)) new_bpm = bpm(self.config['max_strokes'].get(int)) item['bpm'] = int(new_bpm) if write: item.try_write() item.store() - log.info(u'Added new bpm {0}'.format(item['bpm'])) + self._log.info(u'Added new bpm {0}', item['bpm']) diff -Nru beets-1.3.8+dfsg/beetsplug/bucket.py beets-1.3.19/beetsplug/bucket.py --- beets-1.3.8+dfsg/beetsplug/bucket.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/bucket.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Fabrice Laporte. +# Copyright 2016, Fabrice Laporte. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,14 +16,17 @@ """Provides the %bucket{} function for path formatting. """ +from __future__ import division, absolute_import, print_function + from datetime import datetime -import logging import re import string from itertools import tee, izip + from beets import plugins, ui -log = logging.getLogger('beets') + +ASCII_DIGITS = string.digits + string.ascii_lowercase class BucketError(Exception): @@ -44,7 +48,7 @@ """Convert string to a 4 digits year """ if yearfrom < 100: - raise BucketError("%d must be expressed on 4 digits" % yearfrom) + raise BucketError(u"%d must be expressed on 4 digits" % yearfrom) # if two digits only, pick closest year that ends by these two # digits starting from yearfrom @@ -57,12 +61,12 @@ years = [int(x) for x in re.findall('\d+', span_str)] if not years: - raise ui.UserError("invalid range defined for year bucket '%s': no " - "year found" % span_str) + raise ui.UserError(u"invalid range defined for year bucket '%s': no " + u"year found" % span_str) try: years = [normalize_year(x, years[0]) for x in years] except BucketError as exc: - raise ui.UserError("invalid range defined for year bucket '%s': %s" % + raise ui.UserError(u"invalid range defined for year bucket '%s': %s" % (span_str, exc)) res = {'from': years[0], 'str': span_str} @@ -117,13 +121,10 @@ def str2fmt(s): """Deduces formatting syntax from a span string. """ - regex = re.compile("(?P\D*)(?P\d+)(?P\D*)" - "(?P\d*)(?P\D*)") + regex = re.compile(r"(?P\D*)(?P\d+)(?P\D*)" + r"(?P\d*)(?P\D*)") m = re.match(regex, s) - def year_format(year): - return '%%0%dd' % len(year) - res = {'fromnchars': len(m.group('fromyear')), 'tonchars': len(m.group('toyear'))} res['fmt'] = "%s%%s%s%s%s" % (m.group('bef'), @@ -139,6 +140,7 @@ args = (str(yearfrom)[-fromnchars:]) if tonchars: args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:]) + return fmt % args @@ -157,23 +159,23 @@ [from...to] """ spans = [] - ASCII_DIGITS = string.digits + string.ascii_lowercase + for elem in alpha_spans_str: if elem in alpha_regexs: spans.append(re.compile(alpha_regexs[elem])) else: bucket = sorted([x for x in elem.lower() if x.isalnum()]) if bucket: - beginIdx = ASCII_DIGITS.index(bucket[0]) - endIdx = ASCII_DIGITS.index(bucket[-1]) + begin_index = ASCII_DIGITS.index(bucket[0]) + end_index = ASCII_DIGITS.index(bucket[-1]) else: - raise ui.UserError("invalid range defined for alpha bucket " - "'%s': no alphanumeric character found" % + raise ui.UserError(u"invalid range defined for alpha bucket " + u"'%s': no alphanumeric character found" % elem) spans.append( re.compile( - "^[" + ASCII_DIGITS[beginIdx:endIdx + 1] + - ASCII_DIGITS[beginIdx:endIdx + 1].upper() + "]" + "^[" + ASCII_DIGITS[begin_index:end_index + 1] + + ASCII_DIGITS[begin_index:end_index + 1].upper() + "]" ) ) return spans diff -Nru beets-1.3.8+dfsg/beetsplug/chroma.py beets-1.3.19/beetsplug/chroma.py --- beets-1.3.8+dfsg/beetsplug/chroma.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/chroma.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,6 +16,8 @@ """Adds Chromaprint/Acoustid acoustic fingerprinting support to the autotagger. Requires the pyacoustid library. """ +from __future__ import division, absolute_import, print_function + from beets import plugins from beets import ui from beets import util @@ -22,15 +25,14 @@ from beets.util import confit from beets.autotag import hooks import acoustid -import logging from collections import defaultdict API_KEY = '1vOwZtEn' SCORE_THRESH = 0.5 TRACK_ID_WEIGHT = 10.0 COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common? - -log = logging.getLogger('beets') +MAX_RECORDINGS = 5 +MAX_RELEASES = 5 # Stores the Acoustid match information for each track. This is # populated when an import task begins and then used when searching for @@ -46,40 +48,49 @@ _acoustids = {} -def acoustid_match(path): +def prefix(it, count): + """Truncate an iterable to at most `count` items. + """ + for i, v in enumerate(it): + if i >= count: + break + yield v + + +def acoustid_match(log, path): """Gets metadata for a file from Acoustid and populates the _matches, _fingerprints, and _acoustids dictionaries accordingly. """ try: duration, fp = acoustid.fingerprint_file(util.syspath(path)) except acoustid.FingerprintGenerationError as exc: - log.error(u'fingerprinting of {0} failed: {1}' - .format(util.displayable_path(repr(path)), str(exc))) + log.error(u'fingerprinting of {0} failed: {1}', + util.displayable_path(repr(path)), exc) return None _fingerprints[path] = fp try: res = acoustid.lookup(API_KEY, fp, duration, meta='recordings releases') except acoustid.AcoustidError as exc: - log.debug(u'fingerprint matching {0} failed: {1}' - .format(util.displayable_path(repr(path)), str(exc))) + log.debug(u'fingerprint matching {0} failed: {1}', + util.displayable_path(repr(path)), exc) return None - log.debug(u'chroma: fingerprinted {0}' - .format(util.displayable_path(repr(path)))) + log.debug(u'chroma: fingerprinted {0}', + util.displayable_path(repr(path))) # Ensure the response is usable and parse it. if res['status'] != 'ok' or not res.get('results'): - log.debug(u'chroma: no match found') + log.debug(u'no match found') return None result = res['results'][0] # Best match. if result['score'] < SCORE_THRESH: - log.debug(u'chroma: no results above threshold') + log.debug(u'no results above threshold') return None _acoustids[path] = result['id'] # Get recording and releases from the result. if not result.get('recordings'): - log.debug(u'chroma: no recordings found') + log.debug(u'no recordings found') return None recording_ids = [] release_ids = [] @@ -88,7 +99,8 @@ if 'releases' in recording: release_ids += [rel['id'] for rel in recording['releases']] - log.debug(u'chroma: matched recordings {0}'.format(recording_ids)) + log.debug(u'matched recordings {0} on releases {1}', + recording_ids, release_ids) _matches[path] = recording_ids, release_ids @@ -121,9 +133,14 @@ self.config.add({ 'auto': True, }) + config['acoustid']['apikey'].redact = True if self.config['auto']: - self.register_listener('import_task_start', fingerprint_task) + self.register_listener('import_task_start', self.fingerprint_task) + self.register_listener('import_task_apply', apply_acoustid_metadata) + + def fingerprint_task(self, task, session): + return fingerprint_task(self._log, task, session) def track_distance(self, item, info): dist = hooks.Distance() @@ -137,12 +154,12 @@ def candidates(self, items, artist, album, va_likely): albums = [] - for relid in _all_releases(items): + for relid in prefix(_all_releases(items), MAX_RELEASES): album = hooks.album_for_mbid(relid) if album: albums.append(album) - log.debug(u'acoustid album candidates: {0}'.format(len(albums))) + self._log.debug(u'acoustid album candidates: {0}', len(albums)) return albums def item_candidates(self, item, artist, title): @@ -151,34 +168,33 @@ recording_ids, _ = _matches[item.path] tracks = [] - for recording_id in recording_ids: + for recording_id in prefix(recording_ids, MAX_RECORDINGS): track = hooks.track_for_mbid(recording_id) if track: tracks.append(track) - log.debug(u'acoustid item candidates: {0}'.format(len(tracks))) + self._log.debug(u'acoustid item candidates: {0}', len(tracks)) return tracks def commands(self): submit_cmd = ui.Subcommand('submit', - help='submit Acoustid fingerprints') + help=u'submit Acoustid fingerprints') def submit_cmd_func(lib, opts, args): try: apikey = config['acoustid']['apikey'].get(unicode) except confit.NotFoundError: - raise ui.UserError('no Acoustid user API key provided') - submit_items(apikey, lib.items(ui.decargs(args))) + raise ui.UserError(u'no Acoustid user API key provided') + submit_items(self._log, apikey, lib.items(ui.decargs(args))) submit_cmd.func = submit_cmd_func fingerprint_cmd = ui.Subcommand( 'fingerprint', - help='generate fingerprints for items without them' + help=u'generate fingerprints for items without them' ) def fingerprint_cmd_func(lib, opts, args): for item in lib.items(ui.decargs(args)): - fingerprint_item(item, - write=config['import']['write'].get(bool)) + fingerprint_item(self._log, item, write=ui.should_write()) fingerprint_cmd.func = fingerprint_cmd_func return [submit_cmd, fingerprint_cmd] @@ -187,16 +203,15 @@ # Hooks into import process. -def fingerprint_task(task, session): +def fingerprint_task(log, task, session): """Fingerprint each item in the task for later use during the autotagging candidate search. """ items = task.items if task.is_album else [task.item] for item in items: - acoustid_match(item.path) + acoustid_match(log, item.path) -@AcoustidPlugin.listen('import_task_apply') def apply_acoustid_metadata(task, session): """Apply Acoustid metadata (fingerprint and ID) to the task's items. """ @@ -210,22 +225,22 @@ # UI commands. -def submit_items(userkey, items, chunksize=64): +def submit_items(log, userkey, items, chunksize=64): """Submit fingerprints for the items to the Acoustid server. """ data = [] # The running list of dictionaries to submit. def submit_chunk(): """Submit the current accumulated fingerprint data.""" - log.info(u'submitting {0} fingerprints'.format(len(data))) + log.info(u'submitting {0} fingerprints', len(data)) try: acoustid.submit(API_KEY, userkey, data) except acoustid.AcoustidError as exc: - log.warn(u'acoustid submission error: {0}'.format(exc)) + log.warn(u'acoustid submission error: {0}', exc) del data[:] for item in items: - fp = fingerprint_item(item) + fp = fingerprint_item(log, item) # Construct a submission dictionary for this item. item_data = { @@ -257,7 +272,7 @@ submit_chunk() -def fingerprint_item(item, write=False): +def fingerprint_item(log, item, write=False): """Get the fingerprint for an Item. If the item already has a fingerprint, it is not regenerated. If fingerprint generation fails, return None. If the items are associated with a library, they are @@ -266,34 +281,28 @@ """ # Get a fingerprint and length for this track. if not item.length: - log.info(u'{0}: no duration available'.format( - util.displayable_path(item.path) - )) + log.info(u'{0}: no duration available', + util.displayable_path(item.path)) elif item.acoustid_fingerprint: if write: - log.info(u'{0}: fingerprint exists, skipping'.format( - util.displayable_path(item.path) - )) + log.info(u'{0}: fingerprint exists, skipping', + util.displayable_path(item.path)) else: - log.info(u'{0}: using existing fingerprint'.format( - util.displayable_path(item.path) - )) + log.info(u'{0}: using existing fingerprint', + util.displayable_path(item.path)) return item.acoustid_fingerprint else: - log.info(u'{0}: fingerprinting'.format( - util.displayable_path(item.path) - )) + log.info(u'{0}: fingerprinting', + util.displayable_path(item.path)) try: _, fp = acoustid.fingerprint_file(item.path) item.acoustid_fingerprint = fp if write: - log.info(u'{0}: writing fingerprint'.format( - util.displayable_path(item.path) - )) + log.info(u'{0}: writing fingerprint', + util.displayable_path(item.path)) item.try_write() if item._db: item.store() return item.acoustid_fingerprint except acoustid.FingerprintGenerationError as exc: - log.info(u'fingerprint generation failed: {0}' - .format(exc)) + log.info(u'fingerprint generation failed: {0}', exc) diff -Nru beets-1.3.8+dfsg/beetsplug/convert.py beets-1.3.19/beetsplug/convert.py --- beets-1.3.8+dfsg/beetsplug/convert.py 2014-09-15 23:57:56.000000000 +0000 +++ beets-1.3.19/beetsplug/convert.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Jakob Schnitzer. +# Copyright 2016, Jakob Schnitzer. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,20 +15,21 @@ """Converts tracks or albums to external directory """ -import logging +from __future__ import division, absolute_import, print_function + import os import threading import subprocess import tempfile +import shlex from string import Template -import pipes from beets import ui, util, plugins, config from beets.plugins import BeetsPlugin -from beetsplug.embedart import embed_item from beets.util.confit import ConfigTypeError +from beets import art +from beets.util.artresizer import ArtResizer -log = logging.getLogger('beets') _fs_lock = threading.Lock() _temp_files = [] # Keep track of temporary transcoded files for deletion. @@ -37,7 +39,7 @@ u'vorbis': u'ogg', } -LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav'] +LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff'] def replace_ext(path, ext): @@ -45,28 +47,29 @@ The new extension must not contain a leading dot. """ - return os.path.splitext(path)[0] + '.' + ext + ext_dot = util.bytestring_path('.' + ext) + return os.path.splitext(path)[0] + ext_dot -def get_format(format=None): - """Return the command tempate and the extension from the config. +def get_format(fmt=None): + """Return the command template and the extension from the config. """ - if not format: - format = config['convert']['format'].get(unicode).lower() - format = ALIASES.get(format, format) + if not fmt: + fmt = config['convert']['format'].get(unicode).lower() + fmt = ALIASES.get(fmt, fmt) try: - format_info = config['convert']['formats'][format].get(dict) + format_info = config['convert']['formats'][fmt].get(dict) command = format_info['command'] - extension = format_info['extension'] + extension = format_info.get('extension', fmt) except KeyError: raise ui.UserError( - u'convert: format {0} needs "command" and "extension" fields' - .format(format) + u'convert: format {0} needs the "command" field' + .format(fmt) ) except ConfigTypeError: - command = config['convert']['formats'][format].get(str) - extension = format + command = config['convert']['formats'][fmt].get(bytes) + extension = fmt # Convenience and backwards-compatibility shortcuts. keys = config['convert'].keys() @@ -83,50 +86,7 @@ return (command.encode('utf8'), extension.encode('utf8')) -def encode(command, source, dest, pretend=False): - """Encode `source` to `dest` using command template `command`. - - Raises `subprocess.CalledProcessError` if the command exited with a - non-zero status code. - """ - quiet = config['convert']['quiet'].get() - - if not quiet and not pretend: - log.info(u'Encoding {0}'.format(util.displayable_path(source))) - - command = Template(command).safe_substitute({ - 'source': pipes.quote(source), - 'dest': pipes.quote(dest), - }) - - log.debug(u'convert: executing: {0}' - .format(util.displayable_path(command))) - - if pretend: - log.info(command) - return - - try: - util.command_output(command, shell=True) - except subprocess.CalledProcessError: - # Something went wrong (probably Ctrl+C), remove temporary files - log.info(u'Encoding {0} failed. Cleaning up...' - .format(util.displayable_path(source))) - util.remove(dest) - util.prune_dirs(os.path.dirname(dest)) - raise - except OSError as exc: - raise ui.UserError( - u"convert: could invoke '{0}': {0}".format(command, exc) - ) - - if not quiet and not pretend: - log.info(u'Finished encoding {0}'.format( - util.displayable_path(source)) - ) - - -def should_transcode(item, format): +def should_transcode(item, fmt): """Determine whether the item should be transcoded as part of conversion (i.e., its bitrate is high or it has the wrong format). """ @@ -134,158 +94,10 @@ not (item.format.lower() in LOSSLESS_FORMATS): return False maxbr = config['convert']['max_bitrate'].get(int) - return format.lower() != item.format.lower() or \ + return fmt.lower() != item.format.lower() or \ item.bitrate >= 1000 * maxbr -def convert_item(dest_dir, keep_new, path_formats, format, pretend=False): - command, ext = get_format(format) - item, original, converted = None, None, None - while True: - item = yield (item, original, converted) - dest = item.destination(basedir=dest_dir, path_formats=path_formats) - - # When keeping the new file in the library, we first move the - # current (pristine) file to the destination. We'll then copy it - # back to its old path or transcode it to a new path. - if keep_new: - original = dest - converted = item.path - else: - original = item.path - converted = dest - - # Ensure that only one thread tries to create directories at a - # time. (The existence check is not atomic with the directory - # creation inside this function.) - if not pretend: - with _fs_lock: - util.mkdirall(dest) - - if os.path.exists(util.syspath(dest)): - log.info(u'Skipping {0} (target file exists)'.format( - util.displayable_path(item.path) - )) - continue - - if keep_new: - if pretend: - log.info(u'mv {0} {1}'.format( - util.displayable_path(item.path), - util.displayable_path(original), - )) - else: - log.info(u'Moving to {0}'.format( - util.displayable_path(original)) - ) - util.move(item.path, original) - - if should_transcode(item, format): - converted = replace_ext(converted, ext) - try: - encode(command, original, converted, pretend) - except subprocess.CalledProcessError: - continue - else: - if pretend: - log.info(u'cp {0} {1}'.format( - util.displayable_path(original), - util.displayable_path(converted), - )) - else: - # No transcoding necessary. - log.info(u'Copying {0}'.format( - util.displayable_path(item.path)) - ) - util.copy(original, converted) - - if pretend: - continue - - # Write tags from the database to the converted file. - item.try_write(path=converted) - - if keep_new: - # If we're keeping the transcoded file, read it again (after - # writing) to get new bitrate, duration, etc. - item.path = converted - item.read() - item.store() # Store new path and audio data. - - if config['convert']['embed']: - album = item.get_album() - if album and album.artpath: - embed_item(item, album.artpath, itempath=converted) - - if keep_new: - plugins.send('after_convert', item=item, - dest=dest, keepnew=True) - else: - plugins.send('after_convert', item=item, - dest=converted, keepnew=False) - - -def convert_on_import(lib, item): - """Transcode a file automatically after it is imported into the - library. - """ - format = config['convert']['format'].get(unicode).lower() - if should_transcode(item, format): - command, ext = get_format() - fd, dest = tempfile.mkstemp('.' + ext) - os.close(fd) - _temp_files.append(dest) # Delete the transcode later. - try: - encode(command, item.path, dest) - except subprocess.CalledProcessError: - return - item.path = dest - item.write() - item.read() # Load new audio information data. - item.store() - - -def convert_func(lib, opts, args): - if not opts.dest: - opts.dest = config['convert']['dest'].get() - if not opts.dest: - raise ui.UserError('no convert destination set') - opts.dest = util.bytestring_path(opts.dest) - - if not opts.threads: - opts.threads = config['convert']['threads'].get(int) - - if config['convert']['paths']: - path_formats = ui.get_path_formats(config['convert']['paths']) - else: - path_formats = ui.get_path_formats() - - if not opts.format: - opts.format = config['convert']['format'].get(unicode).lower() - - pretend = opts.pretend if opts.pretend is not None else \ - config['convert']['pretend'].get(bool) - - if not pretend: - ui.commands.list_items(lib, ui.decargs(args), opts.album, None) - - if not (opts.yes or ui.input_yn("Convert? (Y/n)")): - return - - if opts.album: - items = (i for a in lib.albums(ui.decargs(args)) for i in a.items()) - else: - items = iter(lib.items(ui.decargs(args))) - convert = [convert_item(opts.dest, - opts.keep_new, - path_formats, - opts.format, - pretend) - for _ in range(opts.threads)] - pipe = util.pipeline.Pipeline([items, convert]) - pipe.run_parallel() - - class ConvertPlugin(BeetsPlugin): def __init__(self): super(ConvertPlugin, self).__init__() @@ -309,50 +121,334 @@ u'opus': u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest', u'ogg': - u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 2 $dest', + u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest', u'wma': u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest', }, u'max_bitrate': 500, u'auto': False, + u'tmpdir': None, u'quiet': False, u'embed': True, u'paths': {}, u'never_convert_lossy_files': False, + u'copy_album_art': False, + u'album_art_maxwidth': 0, }) self.import_stages = [self.auto_convert] + self.register_listener('import_task_files', self._cleanup) + def commands(self): - cmd = ui.Subcommand('convert', help='convert to external location') + cmd = ui.Subcommand('convert', help=u'convert to external location') cmd.parser.add_option('-p', '--pretend', action='store_true', - help='show actions but do nothing') - cmd.parser.add_option('-a', '--album', action='store_true', - help='choose albums instead of tracks') + help=u'show actions but do nothing') cmd.parser.add_option('-t', '--threads', action='store', type='int', - help='change the number of threads, \ + help=u'change the number of threads, \ defaults to maximum available processors') cmd.parser.add_option('-k', '--keep-new', action='store_true', - dest='keep_new', help='keep only the converted \ + dest='keep_new', help=u'keep only the converted \ and move the old files') cmd.parser.add_option('-d', '--dest', action='store', - help='set the destination directory') + help=u'set the destination directory') cmd.parser.add_option('-f', '--format', action='store', dest='format', - help='set the destination directory') - cmd.parser.add_option('-y', '--yes', action='store', dest='yes', - help='do not ask for confirmation') - cmd.func = convert_func + help=u'set the target format of the tracks') + cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes', + help=u'do not ask for confirmation') + cmd.parser.add_album_option() + cmd.func = self.convert_func return [cmd] def auto_convert(self, config, task): if self.config['auto']: for item in task.imported_items(): - convert_on_import(config.lib, item) + self.convert_on_import(config.lib, item) + + # Utilities converted from functions to methods on logging overhaul + + def encode(self, command, source, dest, pretend=False): + """Encode `source` to `dest` using command template `command`. + + Raises `subprocess.CalledProcessError` if the command exited with a + non-zero status code. + """ + # The paths and arguments must be bytes. + assert isinstance(command, bytes) + assert isinstance(source, bytes) + assert isinstance(dest, bytes) + + quiet = self.config['quiet'].get(bool) + + if not quiet and not pretend: + self._log.info(u'Encoding {0}', util.displayable_path(source)) + + # Substitute $source and $dest in the argument list. + args = shlex.split(command) + for i, arg in enumerate(args): + args[i] = Template(arg).safe_substitute({ + 'source': source, + 'dest': dest, + }) + + if pretend: + self._log.info(u' '.join(ui.decargs(args))) + return + + try: + util.command_output(args) + except subprocess.CalledProcessError as exc: + # Something went wrong (probably Ctrl+C), remove temporary files + self._log.info(u'Encoding {0} failed. Cleaning up...', + util.displayable_path(source)) + self._log.debug(u'Command {0} exited with status {1}: {2}', + args, + exc.returncode, + exc.output) + util.remove(dest) + util.prune_dirs(os.path.dirname(dest)) + raise + except OSError as exc: + raise ui.UserError( + u"convert: couldn't invoke '{0}': {1}".format( + u' '.join(ui.decargs(args)), exc + ) + ) + + if not quiet and not pretend: + self._log.info(u'Finished encoding {0}', + util.displayable_path(source)) + + def convert_item(self, dest_dir, keep_new, path_formats, fmt, + pretend=False): + """A pipeline thread that converts `Item` objects from a + library. + """ + command, ext = get_format(fmt) + item, original, converted = None, None, None + while True: + item = yield (item, original, converted) + dest = item.destination(basedir=dest_dir, + path_formats=path_formats) + + # When keeping the new file in the library, we first move the + # current (pristine) file to the destination. We'll then copy it + # back to its old path or transcode it to a new path. + if keep_new: + original = dest + converted = item.path + if should_transcode(item, fmt): + converted = replace_ext(converted, ext) + else: + original = item.path + if should_transcode(item, fmt): + dest = replace_ext(dest, ext) + converted = dest + + # Ensure that only one thread tries to create directories at a + # time. (The existence check is not atomic with the directory + # creation inside this function.) + if not pretend: + with _fs_lock: + util.mkdirall(dest) + + if os.path.exists(util.syspath(dest)): + self._log.info(u'Skipping {0} (target file exists)', + util.displayable_path(item.path)) + continue + + if keep_new: + if pretend: + self._log.info(u'mv {0} {1}', + util.displayable_path(item.path), + util.displayable_path(original)) + else: + self._log.info(u'Moving to {0}', + util.displayable_path(original)) + util.move(item.path, original) + + if should_transcode(item, fmt): + try: + self.encode(command, original, converted, pretend) + except subprocess.CalledProcessError: + continue + else: + if pretend: + self._log.info(u'cp {0} {1}', + util.displayable_path(original), + util.displayable_path(converted)) + else: + # No transcoding necessary. + self._log.info(u'Copying {0}', + util.displayable_path(item.path)) + util.copy(original, converted) + + if pretend: + continue + + # Write tags from the database to the converted file. + item.try_write(path=converted) + + if keep_new: + # If we're keeping the transcoded file, read it again (after + # writing) to get new bitrate, duration, etc. + item.path = converted + item.read() + item.store() # Store new path and audio data. + + if self.config['embed']: + album = item.get_album() + if album and album.artpath: + self._log.debug(u'embedding album art from {}', + util.displayable_path(album.artpath)) + art.embed_item(self._log, item, album.artpath, + itempath=converted) + + if keep_new: + plugins.send('after_convert', item=item, + dest=dest, keepnew=True) + else: + plugins.send('after_convert', item=item, + dest=converted, keepnew=False) + + def copy_album_art(self, album, dest_dir, path_formats, pretend=False): + """Copies or converts the associated cover art of the album. Album must + have at least one track. + """ + if not album or not album.artpath: + return + + album_item = album.items().get() + # Album shouldn't be empty. + if not album_item: + return + + # Get the destination of the first item (track) of the album, we use + # this function to format the path accordingly to path_formats. + dest = album_item.destination(basedir=dest_dir, + path_formats=path_formats) + # Remove item from the path. + dest = os.path.join(*util.components(dest)[:-1]) + + dest = album.art_destination(album.artpath, item_dir=dest) + if album.artpath == dest: + return + + if not pretend: + util.mkdirall(dest) + + if os.path.exists(util.syspath(dest)): + self._log.info(u'Skipping {0} (target file exists)', + util.displayable_path(album.artpath)) + return + + # Decide whether we need to resize the cover-art image. + resize = False + maxwidth = None + if self.config['album_art_maxwidth']: + maxwidth = self.config['album_art_maxwidth'].get(int) + size = ArtResizer.shared.get_size(album.artpath) + self._log.debug('image size: {}', size) + if size: + resize = size[0] > maxwidth + else: + self._log.warning(u'Could not get size of image (please see ' + u'documentation for dependencies).') + + # Either copy or resize (while copying) the image. + if resize: + self._log.info(u'Resizing cover art from {0} to {1}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + if not pretend: + ArtResizer.shared.resize(maxwidth, album.artpath, dest) + else: + if pretend: + self._log.info(u'cp {0} {1}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + else: + self._log.info(u'Copying cover art to {0}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + util.copy(album.artpath, dest) + + def convert_func(self, lib, opts, args): + if not opts.dest: + opts.dest = self.config['dest'].get() + if not opts.dest: + raise ui.UserError(u'no convert destination set') + opts.dest = util.bytestring_path(opts.dest) + + if not opts.threads: + opts.threads = self.config['threads'].get(int) + + if self.config['paths']: + path_formats = ui.get_path_formats(self.config['paths']) + else: + path_formats = ui.get_path_formats() + + if not opts.format: + opts.format = self.config['format'].get(unicode).lower() + + pretend = opts.pretend if opts.pretend is not None else \ + self.config['pretend'].get(bool) + + if not pretend: + ui.commands.list_items(lib, ui.decargs(args), opts.album) + + if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")): + return + + if opts.album: + albums = lib.albums(ui.decargs(args)) + items = (i for a in albums for i in a.items()) + if self.config['copy_album_art']: + for album in albums: + self.copy_album_art(album, opts.dest, path_formats, + pretend) + else: + items = iter(lib.items(ui.decargs(args))) + convert = [self.convert_item(opts.dest, + opts.keep_new, + path_formats, + opts.format, + pretend) + for _ in range(opts.threads)] + pipe = util.pipeline.Pipeline([items, convert]) + pipe.run_parallel() + + def convert_on_import(self, lib, item): + """Transcode a file automatically after it is imported into the + library. + """ + fmt = self.config['format'].get(unicode).lower() + if should_transcode(item, fmt): + command, ext = get_format() + + # Create a temporary file for the conversion. + tmpdir = self.config['tmpdir'].get() + fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir) + os.close(fd) + dest = util.bytestring_path(dest) + _temp_files.append(dest) # Delete the transcode later. + + # Convert. + try: + self.encode(command, item.path, dest) + except subprocess.CalledProcessError: + return -@ConvertPlugin.listen('import_task_files') -def _cleanup(task, session): - for path in task.old_paths: - if path in _temp_files: - if os.path.isfile(path): - util.remove(path) - _temp_files.remove(path) + # Change the newly-imported database entry to point to the + # converted file. + item.path = dest + item.write() + item.read() # Load new audio information data. + item.store() + + def _cleanup(self, task, session): + for path in task.old_paths: + if path in _temp_files: + if os.path.isfile(path): + util.remove(path) + _temp_files.remove(path) diff -Nru beets-1.3.8+dfsg/beetsplug/cue.py beets-1.3.19/beetsplug/cue.py --- beets-1.3.8+dfsg/beetsplug/cue.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/cue.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 Bruno Cauet +# Split an album-file in tracks thanks a cue file + +from __future__ import division, absolute_import, print_function + +import subprocess +from os import path +from glob import glob + +from beets.util import command_output, displayable_path +from beets.plugins import BeetsPlugin +from beets.autotag import TrackInfo + + +class CuePlugin(BeetsPlugin): + def __init__(self): + super(CuePlugin, self).__init__() + # this does not seem supported by shnsplit + self.config.add({ + 'keep_before': .1, + 'keep_after': .9, + }) + + # self.register_listener('import_task_start', self.look_for_cues) + + def candidates(self, items, artist, album, va_likely): + import pdb + pdb.set_trace() + + def item_candidates(self, item, artist, album): + dir = path.dirname(item.path) + cues = glob.glob(path.join(dir, "*.cue")) + if not cues: + return + if len(cues) > 1: + self._log.info(u"Found multiple cue files doing nothing: {0}", + map(displayable_path, cues)) + + cue_file = cues[0] + self._log.info("Found {} for {}", displayable_path(cue_file), item) + + try: + # careful: will ask for input in case of conflicts + command_output(['shnsplit', '-f', cue_file, item.path]) + except (subprocess.CalledProcessError, OSError): + self._log.exception(u'shnsplit execution failed') + return + + tracks = glob(path.join(dir, "*.wav")) + self._log.info("Generated {0} tracks", len(tracks)) + for t in tracks: + title = "dunno lol" + track_id = "wtf" + index = int(path.basename(t)[len("split-track"):-len(".wav")]) + yield TrackInfo(title, track_id, index=index, artist=artist) + # generate TrackInfo instances diff -Nru beets-1.3.8+dfsg/beetsplug/discogs.py beets-1.3.19/beetsplug/discogs.py --- beets-1.3.8+dfsg/beetsplug/discogs.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/discogs.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,31 +16,112 @@ """Adds Discogs album search support to the autotagger. Requires the discogs-client library. """ +from __future__ import division, absolute_import, print_function + +import beets.ui +from beets import logging +from beets import config from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance from beets.plugins import BeetsPlugin +from beets.util import confit from discogs_client import Release, Client from discogs_client.exceptions import DiscogsAPIError +from requests.exceptions import ConnectionError import beets -import logging import re import time +import json +import socket +import httplib +import os -log = logging.getLogger('beets') # Silence spurious INFO log lines generated by urllib3. urllib3_logger = logging.getLogger('requests.packages.urllib3') urllib3_logger.setLevel(logging.CRITICAL) +USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__) + +# Exceptions that discogs_client should really handle but does not. +CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException, + ValueError, # JSON decoding raises a ValueError. + DiscogsAPIError) + class DiscogsPlugin(BeetsPlugin): def __init__(self): super(DiscogsPlugin, self).__init__() self.config.add({ + 'apikey': 'rAzVUQYRaoFjeBjyWuWZ', + 'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy', + 'tokenfile': 'discogs_token.json', 'source_weight': 0.5, }) - self.discogs_client = Client('beets/%s +http://beets.radbox.org/' % - beets.__version__) + self.config['apikey'].redact = True + self.config['apisecret'].redact = True + self.discogs_client = None + self.register_listener('import_begin', self.setup) + + def setup(self, session=None): + """Create the `discogs_client` field. Authenticate if necessary. + """ + c_key = self.config['apikey'].get(unicode) + c_secret = self.config['apisecret'].get(unicode) + + # Get the OAuth token from a file or log in. + try: + with open(self._tokenfile()) as f: + tokendata = json.load(f) + except IOError: + # No token yet. Generate one. + token, secret = self.authenticate(c_key, c_secret) + else: + token = tokendata['token'] + secret = tokendata['secret'] + + self.discogs_client = Client(USER_AGENT, c_key, c_secret, + token, secret) + + def reset_auth(self): + """Delete toke file & redo the auth steps. + """ + os.remove(self._tokenfile()) + self.setup() + + def _tokenfile(self): + """Get the path to the JSON file for storing the OAuth token. + """ + return self.config['tokenfile'].get(confit.Filename(in_app_dir=True)) + + def authenticate(self, c_key, c_secret): + # Get the link for the OAuth page. + auth_client = Client(USER_AGENT, c_key, c_secret) + try: + _, _, url = auth_client.get_authorize_url() + except CONNECTION_ERRORS as e: + self._log.debug(u'connection error: {0}', e) + raise beets.ui.UserError(u'communication with Discogs failed') + + beets.ui.print_(u"To authenticate with Discogs, visit:") + beets.ui.print_(url) + + # Ask for the code and validate it. + code = beets.ui.input_(u"Enter the code:") + try: + token, secret = auth_client.get_access_token(code) + except DiscogsAPIError: + raise beets.ui.UserError(u'Discogs authorization failed') + except CONNECTION_ERRORS as e: + self._log.debug(u'connection error: {0}', e) + raise beets.ui.UserError(u'Discogs token request failed') + + # Save the token for later use. + self._log.debug(u'Discogs token {0}, secret {1}', token, secret) + with open(self._tokenfile(), 'w') as f: + json.dump({'token': token, 'secret': secret}, f) + + return token, secret def album_distance(self, items, album_info, mapping): """Returns the album distance. @@ -53,6 +135,9 @@ """Returns a list of AlbumInfo objects for discogs search results matching an album and artist (if not various). """ + if not self.discogs_client: + return + if va_likely: query = album else: @@ -60,14 +145,24 @@ try: return self.get_albums(query) except DiscogsAPIError as e: - log.debug(u'Discogs API Error: {0} (query: {1})'.format(e, query)) + self._log.debug(u'API Error: {0} (query: {1})', e, query) + if e.status_code == 401: + self.reset_auth() + return self.candidates(items, artist, album, va_likely) + else: + return [] + except CONNECTION_ERRORS: + self._log.debug(u'Connection error in album search', exc_info=True) return [] def album_for_id(self, album_id): """Fetches an album by its Discogs ID and returns an AlbumInfo object or None if the album is not found. """ - log.debug(u'Searching Discogs for release {0}'.format(str(album_id))) + if not self.discogs_client: + return + + self._log.debug(u'Searching for release {0}', album_id) # Discogs-IDs are simple integers. We only look for those at the end # of an input string as to avoid confusion with other metadata plugins. # An optional bracket can follow the integer, as this is how discogs @@ -81,9 +176,14 @@ try: getattr(result, 'title') except DiscogsAPIError as e: - if e.message != '404 Not Found': - log.debug(u'Discogs API Error: {0} (query: {1})' - .format(e, result._uri)) + if e.status_code != 404: + self._log.debug(u'API Error: {0} (query: {1})', e, result._uri) + if e.status_code == 401: + self.reset_auth() + return self.album_for_id(album_id) + return None + except CONNECTION_ERRORS: + self._log.debug(u'Connection error in album lookup', exc_info=True) return None return self.get_album_info(result) @@ -94,11 +194,20 @@ # cause a query to return no results, even if they match the artist or # album title. Use `re.UNICODE` flag to avoid stripping non-english # word characters. - query = re.sub(r'(?u)\W+', ' ', query).encode('utf8') + # FIXME: Encode as ASCII to work around a bug: + # https://github.com/beetbox/beets/issues/1051 + # When the library is fixed, we should encode as UTF-8. + query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace") # Strip medium information from query, Things like "CD1" and "disk 1" # can also negate an otherwise positive result. - query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query) - releases = self.discogs_client.search(query, type='release').page(1) + query = re.sub(br'(?i)\b(CD|disc)\s*\d+', '', query) + try: + releases = self.discogs_client.search(query, + type='release').page(1) + except CONNECTION_ERRORS: + self._log.debug(u"Communication error while searching for {0!r}", + query, exc_info=True) + return [] return [self.get_album_info(release) for release in releases[:5]] def get_album_info(self, result): @@ -115,6 +224,8 @@ albumtype = ', '.join( result.data['formats'][0].get('descriptions', [])) or None va = result.data['artists'][0]['name'].lower() == 'various' + if va: + artist = config['va_name'].get(unicode) year = result.data['year'] label = result.data['labels'][0]['name'] mediums = len(set(t.medium for t in tracks)) @@ -141,7 +252,7 @@ """ artist_id = None bits = [] - for artist in artists: + for i, artist in enumerate(artists): if not artist_id: artist_id = artist['id'] name = artist['name'] @@ -150,7 +261,7 @@ # Move articles to the front. name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name) bits.append(name) - if artist['join']: + if artist['join'] and i < len(artists) - 1: bits.append(artist['join']) artist = ' '.join(bits).replace(' ,', ',') or None return artist, artist_id @@ -225,7 +336,7 @@ if match: medium, index = match.groups() else: - log.debug(u'Invalid Discogs position: {0}'.format(position)) + self._log.debug(u'Invalid position: {0}', position) medium = index = None return medium or None, index or None diff -Nru beets-1.3.8+dfsg/beetsplug/duplicates.py beets-1.3.19/beetsplug/duplicates.py --- beets-1.3.8+dfsg/beetsplug/duplicates.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/duplicates.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Pedro Silva. +# Copyright 2016, Pedro Silva. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,91 +15,16 @@ """List duplicate tracks or albums. """ +from __future__ import division, absolute_import, print_function + import shlex -import logging from beets.plugins import BeetsPlugin -from beets.ui import decargs, print_obj, vararg_callback, Subcommand, UserError +from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError from beets.util import command_output, displayable_path, subprocess +from beets.library import Item, Album PLUGIN = 'duplicates' -log = logging.getLogger('beets') - - -def _process_item(item, lib, copy=False, move=False, delete=False, - tag=False, format=None): - """Process Item `item` in `lib`. - """ - if copy: - item.move(basedir=copy, copy=True) - item.store() - if move: - item.move(basedir=move, copy=False) - item.store() - if delete: - item.remove(delete=True) - if tag: - try: - k, v = tag.split('=') - except: - raise UserError('%s: can\'t parse k=v tag: %s' % (PLUGIN, tag)) - setattr(k, v) - item.store() - print_obj(item, lib, fmt=format) - - -def _checksum(item, prog): - """Run external `prog` on file path associated with `item`, cache - output as flexattr on a key that is the name of the program, and - return the key, checksum tuple. - """ - args = [p.format(file=item.path) for p in shlex.split(prog)] - key = args[0] - checksum = getattr(item, key, False) - if not checksum: - log.debug(u'{0}: key {1} on item {2} not cached: computing checksum' - .format(PLUGIN, key, displayable_path(item.path))) - try: - checksum = command_output(args) - setattr(item, key, checksum) - item.store() - log.debug(u'{)}: computed checksum for {1} using {2}' - .format(PLUGIN, item.title, key)) - except subprocess.CalledProcessError as e: - log.debug(u'{0}: failed to checksum {1}: {2}' - .format(PLUGIN, displayable_path(item.path), e)) - else: - log.debug(u'{0}: key {1} on item {2} cached: not computing checksum' - .format(PLUGIN, key, displayable_path(item.path))) - return key, checksum - - -def _group_by(objs, keys): - """Return a dictionary with keys arbitrary concatenations of attributes and - values lists of objects (Albums or Items) with those keys. - """ - import collections - counts = collections.defaultdict(list) - for obj in objs: - values = [getattr(obj, k, None) for k in keys] - values = [v for v in values if v not in (None, '')] - if values: - key = '\001'.join(values) - counts[key].append(obj) - else: - log.debug(u'{0}: all keys {1} on item {2} are null: skipping' - .format(PLUGIN, str(keys), displayable_path(obj.path))) - - return counts - - -def _duplicates(objs, keys, full): - """Generate triples of keys, duplicate counts, and constituent objects. - """ - offset = 0 if full else 1 - for k, objs in _group_by(objs, keys).iteritems(): - if len(objs) > 1: - yield (k, len(objs) - offset, objs[offset:]) class DuplicatesPlugin(BeetsPlugin): @@ -108,100 +34,111 @@ super(DuplicatesPlugin, self).__init__() self.config.add({ - 'format': '', - 'count': False, 'album': False, + 'checksum': '', + 'copy': '', + 'count': False, + 'delete': False, + 'format': '', 'full': False, + 'keys': [], + 'merge': False, + 'move': '', 'path': False, - 'keys': ['mb_trackid', 'mb_albumid'], - 'checksum': None, - 'copy': False, - 'move': False, - 'delete': False, - 'tag': False, + 'tiebreak': {}, + 'strict': False, + 'tag': '', }) self._command = Subcommand('duplicates', help=__doc__, aliases=['dup']) - - self._command.parser.add_option('-f', '--format', dest='format', - action='store', type='string', - help='print with custom format', - metavar='FMT') - - self._command.parser.add_option('-a', '--album', dest='album', - action='store_true', - help='show duplicate albums instead of' - ' tracks') - - self._command.parser.add_option('-c', '--count', dest='count', - action='store_true', - help='show duplicate counts') - - self._command.parser.add_option('-C', '--checksum', dest='checksum', - action='store', metavar='PROG', - help='report duplicates based on' - ' arbitrary command') - - self._command.parser.add_option('-d', '--delete', dest='delete', - action='store_true', - help='delete items from library and ' - 'disk') - - self._command.parser.add_option('-F', '--full', dest='full', - action='store_true', - help='show all versions of duplicate' - ' tracks or albums') - - self._command.parser.add_option('-k', '--keys', dest='keys', - action='callback', metavar='KEY1 KEY2', - callback=vararg_callback, - help='report duplicates based on keys') - - self._command.parser.add_option('-m', '--move', dest='move', - action='store', metavar='DEST', - help='move items to dest') - - self._command.parser.add_option('-o', '--copy', dest='copy', - action='store', metavar='DEST', - help='copy items to dest') - - self._command.parser.add_option('-p', '--path', dest='path', - action='store_true', - help='print paths for matched items or' - ' albums') - - self._command.parser.add_option('-t', '--tag', dest='tag', - action='store', - help='tag matched items with \'k=v\'' - ' attribute') + self._command.parser.add_option( + u'-c', u'--count', dest='count', + action='store_true', + help=u'show duplicate counts', + ) + self._command.parser.add_option( + u'-C', u'--checksum', dest='checksum', + action='store', metavar='PROG', + help=u'report duplicates based on arbitrary command', + ) + self._command.parser.add_option( + u'-d', u'--delete', dest='delete', + action='store_true', + help=u'delete items from library and disk', + ) + self._command.parser.add_option( + u'-F', u'--full', dest='full', + action='store_true', + help=u'show all versions of duplicate tracks or albums', + ) + self._command.parser.add_option( + u'-s', u'--strict', dest='strict', + action='store_true', + help=u'report duplicates only if all attributes are set', + ) + self._command.parser.add_option( + u'-k', u'--keys', dest='keys', + action='callback', metavar='KEY1 KEY2', + callback=vararg_callback, + help=u'report duplicates based on keys', + ) + self._command.parser.add_option( + u'-M', u'--merge', dest='merge', + action='store_true', + help=u'merge duplicate items', + ) + self._command.parser.add_option( + u'-m', u'--move', dest='move', + action='store', metavar='DEST', + help=u'move items to dest', + ) + self._command.parser.add_option( + u'-o', u'--copy', dest='copy', + action='store', metavar='DEST', + help=u'copy items to dest', + ) + self._command.parser.add_option( + u'-t', u'--tag', dest='tag', + action='store', + help=u'tag matched items with \'k=v\' attribute', + ) + self._command.parser.add_all_common_options() def commands(self): def _dup(lib, opts, args): self.config.set_args(opts) - fmt = self.config['format'].get() album = self.config['album'].get(bool) - full = self.config['full'].get(bool) - keys = self.config['keys'].get() - checksum = self.config['checksum'].get() - copy = self.config['copy'].get() - move = self.config['move'].get() + checksum = self.config['checksum'].get(str) + copy = self.config['copy'].get(str) + count = self.config['count'].get(bool) delete = self.config['delete'].get(bool) - tag = self.config['tag'].get() + fmt = self.config['format'].get(str) + full = self.config['full'].get(bool) + keys = self.config['keys'].get(list) + merge = self.config['merge'].get(bool) + move = self.config['move'].get(str) + path = self.config['path'].get(bool) + tiebreak = self.config['tiebreak'].get(dict) + strict = self.config['strict'].get(bool) + tag = self.config['tag'].get(str) if album: - keys = ['mb_albumid'] + if not keys: + keys = ['mb_albumid'] items = lib.albums(decargs(args)) else: + if not keys: + keys = ['mb_trackid', 'mb_albumid'] items = lib.items(decargs(args)) - if self.config['path']: + if path: fmt = '$path' # Default format string for count mode. - if self.config['count'] and not fmt: + if count and not fmt: if album: fmt = '$albumartist - $album' else: @@ -210,20 +147,191 @@ if checksum: for i in items: - k, _ = _checksum(i, checksum) + k, _ = self._checksum(i, checksum) keys = [k] - for obj_id, obj_count, objs in _duplicates(items, - keys=keys, - full=full): + for obj_id, obj_count, objs in self._duplicates(items, + keys=keys, + full=full, + strict=strict, + tiebreak=tiebreak, + merge=merge): if obj_id: # Skip empty IDs. for o in objs: - _process_item(o, lib, - copy=copy, - move=move, - delete=delete, - tag=tag, - format=fmt.format(obj_count)) + self._process_item(o, + copy=copy, + move=move, + delete=delete, + tag=tag, + fmt=fmt.format(obj_count)) self._command.func = _dup return [self._command] + + def _process_item(self, item, copy=False, move=False, delete=False, + tag=False, fmt=''): + """Process Item `item`. + """ + print_(format(item, fmt)) + if copy: + item.move(basedir=copy, copy=True) + item.store() + if move: + item.move(basedir=move, copy=False) + item.store() + if delete: + item.remove(delete=True) + if tag: + try: + k, v = tag.split('=') + except: + raise UserError( + u"{}: can't parse k=v tag: {}".format(PLUGIN, tag) + ) + setattr(item, k, v) + item.store() + + def _checksum(self, item, prog): + """Run external `prog` on file path associated with `item`, cache + output as flexattr on a key that is the name of the program, and + return the key, checksum tuple. + """ + args = [p.format(file=item.path) for p in shlex.split(prog)] + key = args[0] + checksum = getattr(item, key, False) + if not checksum: + self._log.debug(u'key {0} on item {1} not cached:' + u'computing checksum', + key, displayable_path(item.path)) + try: + checksum = command_output(args) + setattr(item, key, checksum) + item.store() + self._log.debug(u'computed checksum for {0} using {1}', + item.title, key) + except subprocess.CalledProcessError as e: + self._log.debug(u'failed to checksum {0}: {1}', + displayable_path(item.path), e) + else: + self._log.debug(u'key {0} on item {1} cached:' + u'not computing checksum', + key, displayable_path(item.path)) + return key, checksum + + def _group_by(self, objs, keys, strict): + """Return a dictionary with keys arbitrary concatenations of attributes + and values lists of objects (Albums or Items) with those keys. + + If strict, all attributes must be defined for a duplicate match. + """ + import collections + counts = collections.defaultdict(list) + for obj in objs: + values = [getattr(obj, k, None) for k in keys] + values = [v for v in values if v not in (None, '')] + if strict and len(values) < len(keys): + self._log.debug(u'some keys {0} on item {1} are null or empty:' + u' skipping', + keys, displayable_path(obj.path)) + elif (not strict and not len(values)): + self._log.debug(u'all keys {0} on item {1} are null or empty:' + u' skipping', + keys, displayable_path(obj.path)) + else: + key = tuple(values) + counts[key].append(obj) + + return counts + + def _order(self, objs, tiebreak=None): + """Return the objects (Items or Albums) sorted by descending + order of priority. + + If provided, the `tiebreak` dict indicates the field to use to + prioritize the objects. Otherwise, Items are placed in order of + "completeness" (objects with more non-null fields come first) + and Albums are ordered by their track count. + """ + if tiebreak: + kind = 'items' if all(isinstance(o, Item) + for o in objs) else 'albums' + key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind]) + else: + kind = Item if all(isinstance(o, Item) for o in objs) else Album + if kind is Item: + def truthy(v): + # Avoid a Unicode warning by avoiding comparison + # between a bytes object and the empty Unicode + # string ''. + return v is not None and \ + (v != '' if isinstance(v, unicode) else True) + fields = kind.all_keys() + key = lambda x: sum(1 for f in fields if truthy(getattr(x, f))) + else: + key = lambda x: len(x.items()) + + return sorted(objs, key=key, reverse=True) + + def _merge_items(self, objs): + """Merge Item objs by copying missing fields from items in the tail to + the head item. + + Return same number of items, with the head item modified. + """ + fields = Item.all_keys() + for f in fields: + for o in objs[1:]: + if getattr(objs[0], f, None) in (None, ''): + value = getattr(o, f, None) + if value: + self._log.debug(u'key {0} on item {1} is null ' + u'or empty: setting from item {2}', + f, displayable_path(objs[0].path), + displayable_path(o.path)) + setattr(objs[0], f, value) + objs[0].store() + break + return objs + + def _merge_albums(self, objs): + """Merge Album objs by copying missing items from albums in the tail + to the head album. + + Return same number of albums, with the head album modified.""" + ids = [i.mb_trackid for i in objs[0].items()] + for o in objs[1:]: + for i in o.items(): + if i.mb_trackid not in ids: + missing = Item.from_path(i.path) + missing.album_id = objs[0].id + missing.add(i._db) + self._log.debug(u'item {0} missing from album {1}:' + u' merging from {2} into {3}', + missing, + objs[0], + displayable_path(o.path), + displayable_path(missing.destination())) + missing.move(copy=True) + return objs + + def _merge(self, objs): + """Merge duplicate items. See ``_merge_items`` and ``_merge_albums`` + for the relevant strategies. + """ + kind = Item if all(isinstance(o, Item) for o in objs) else Album + if kind is Item: + objs = self._merge_items(objs) + else: + objs = self._merge_albums(objs) + return objs + + def _duplicates(self, objs, keys, full, strict, tiebreak, merge): + """Generate triples of keys, duplicate counts, and constituent objects. + """ + offset = 0 if full else 1 + for k, objs in self._group_by(objs, keys, strict).iteritems(): + if len(objs) > 1: + objs = self._order(objs, tiebreak) + if merge: + objs = self._merge(objs) + yield (k, len(objs) - offset, objs[offset:]) diff -Nru beets-1.3.8+dfsg/beetsplug/echonest.py beets-1.3.19/beetsplug/echonest.py --- beets-1.3.8+dfsg/beetsplug/echonest.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/beetsplug/echonest.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,545 +0,0 @@ -# This file is part of beets. -# Copyright 2013, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Fetch a variety of acoustic metrics from The Echo Nest. -""" -import time -import logging -import socket -import os -import tempfile -from string import Template -import subprocess - -from beets import util, config, plugins, ui -from beets.dbcore import types -import pyechonest -import pyechonest.song -import pyechonest.track - -log = logging.getLogger('beets') - -# If a request at the EchoNest fails, we want to retry the request RETRIES -# times and wait between retries for RETRY_INTERVAL seconds. -RETRIES = 10 -RETRY_INTERVAL = 10 - -DEVNULL = open(os.devnull, 'wb') -ALLOWED_FORMATS = ('MP3', 'OGG', 'AAC') -UPLOAD_MAX_SIZE = 50 * 1024 * 1024 - -# Maps attribute names from echonest to their field names in beets. -# The attributes are retrieved from a songs `audio_summary`. See: -# http://echonest.github.io/pyechonest/song.html#pyechonest.song.profile -ATTRIBUTES = { - 'energy': 'energy', - 'liveness': 'liveness', - 'speechiness': 'speechiness', - 'acousticness': 'acousticness', - 'danceability': 'danceability', - 'valence': 'valence', - 'tempo': 'bpm', -} - -# Types for the flexible fields added by `ATTRIBUTES` -FIELD_TYPES = { - 'energy': types.FLOAT, - 'liveness': types.FLOAT, - 'speechiness': types.FLOAT, - 'acousticness': types.FLOAT, - 'danceability': types.FLOAT, - 'valence': types.FLOAT, -} - -MUSICAL_SCALE = ['C', 'C#', 'D', 'D#', 'E' 'F', - 'F#', 'G', 'G#', 'A', 'A#', 'B'] - - -# We also use echonest_id (song_id) and echonest_fingerprint to speed up -# lookups. -ID_KEY = 'echonest_id' -FINGERPRINT_KEY = 'echonest_fingerprint' - - -def _splitstrip(string, delim=u','): - """Split string (at commas by default) and strip whitespace from the - pieces. - """ - return [s.strip() for s in string.split(delim)] - - -def diff(item1, item2): - """Score two Item objects according to the Echo Nest numerical - fields. - """ - result = 0.0 - for attr in ATTRIBUTES.values(): - if attr == 'bpm': - # BPM (tempo) is handled specially to normalize. - continue - - try: - result += abs( - float(item1.get(attr, None)) - - float(item2.get(attr, None)) - ) - except TypeError: - result += 1.0 - - try: - bpm1 = float(item1.get('bpm', None)) - bpm2 = float(item2.get('bpm', None)) - result += abs(bpm1 - bpm2) / max(bpm1, bpm2, 1) - except TypeError: - result += 1.0 - - return result - - -def similar(lib, src_item, threshold=0.15, fmt='${difference}: ${path}'): - for item in lib.items(): - if item.path != src_item.path: - d = diff(item, src_item) - if d < threshold: - s = fmt.replace('${difference}', '{:2.2f}'.format(d)) - ui.print_obj(item, lib, s) - - -class EchonestMetadataPlugin(plugins.BeetsPlugin): - - item_types = FIELD_TYPES - - def __init__(self): - super(EchonestMetadataPlugin, self).__init__() - self.config.add({ - 'auto': True, - 'apikey': u'NY2KTZHQ0QDSHBAP6', - 'codegen': None, - 'upload': True, - 'convert': True, - 'truncate': True, - }) - self.config.add(ATTRIBUTES) - - pyechonest.config.ECHO_NEST_API_KEY = \ - config['echonest']['apikey'].get(unicode) - - if config['echonest']['codegen']: - pyechonest.config.CODEGEN_BINARY_OVERRIDE = \ - config['echonest']['codegen'].get(unicode) - - if self.config['auto']: - self.import_stages = [self.imported] - - def _echofun(self, func, **kwargs): - """Wrapper for requests to the EchoNest API. Will retry up to - RETRIES times and wait between retries for RETRY_INTERVAL - seconds. - """ - for i in range(RETRIES): - try: - result = func(**kwargs) - except pyechonest.util.EchoNestAPIError as e: - if e.code == 3: - # reached access limit per minute - log.debug(u'echonest: rate-limited on try {0}; ' - u'waiting {1} seconds' - .format(i + 1, RETRY_INTERVAL)) - time.sleep(RETRY_INTERVAL) - elif e.code == 5: - # specified identifier does not exist - # no use in trying again. - log.debug(u'echonest: {0}'.format(e)) - return None - else: - log.error(u'echonest: {0}'.format(e.args[0][0])) - return None - except (pyechonest.util.EchoNestIOError, socket.error) as e: - log.warn(u'echonest: IO error: {0}'.format(e)) - time.sleep(RETRY_INTERVAL) - except Exception as e: - # there was an error analyzing the track, status: error - log.debug(u'echonest: {0}'.format(e)) - return None - else: - break - else: - # If we exited the loop without breaking, then we used up all - # our allotted retries. - raise ui.UserError(u'echonest request failed repeatedly') - return None - return result - - def _pick_song(self, songs, item): - """Helper method to pick the best matching song from a list of songs - returned by the EchoNest. Compares artist, title and duration. If - the artist and title match and the duration difference is <= 1.0 - seconds, it's considered a match. - """ - if not songs: - log.debug(u'echonest: no songs found') - return - - pick = None - min_dist = item.length - for song in songs: - if song.artist_name.lower() == item.artist.lower() \ - and song.title.lower() == item.title.lower(): - dist = abs(item.length - song.audio_summary['duration']) - if dist < min_dist: - min_dist = dist - pick = song - if min_dist > 2.5: - return None - return pick - - def _flatten_song(self, song): - """Given an Echo Nest song object, return a flat dict containing - attributes we care about. If song is None, return None. - """ - if not song: - return - values = dict(song.audio_summary) - values['id'] = song.id - return values - - # "Profile" (ID-based) lookup. - - def profile(self, item): - """Do a lookup on the EchoNest by MusicBrainz ID. - """ - # Use an existing Echo Nest ID. - if ID_KEY in item: - enid = item[ID_KEY] - - # Look up the Echo Nest ID based on the MBID. - else: - if not item.mb_trackid: - log.debug(u'echonest: no ID available') - return - mbid = 'musicbrainz:track:{0}'.format(item.mb_trackid) - track = self._echofun(pyechonest.track.track_from_id, - identifier=mbid) - if not track: - log.debug(u'echonest: lookup by MBID failed') - return - enid = track.song_id - - # Use the Echo Nest ID to look up the song. - songs = self._echofun(pyechonest.song.profile, ids=enid, - buckets=['id:musicbrainz', 'audio_summary']) - return self._flatten_song(self._pick_song(songs, item)) - - # "Search" (metadata-based) lookup. - - def search(self, item): - """Search the item at the EchoNest by artist and title. - """ - songs = self._echofun(pyechonest.song.search, title=item.title, - results=100, artist=item.artist, - buckets=['id:musicbrainz', 'tracks', - 'audio_summary']) - return self._flatten_song(self._pick_song(songs, item)) - - # "Identify" (fingerprinting) lookup. - - def fingerprint(self, item): - """Get the fingerprint for this item from the EchoNest. If we - already have a fingerprint, return it and don't calculate it - again. - """ - if FINGERPRINT_KEY in item: - return item[FINGERPRINT_KEY] - - try: - res = self._echofun(pyechonest.util.codegen, - filename=item.path.decode('utf-8')) - except Exception as e: - # Frustratingly, the pyechonest library raises a plain Exception - # when the command is not found. - log.debug(u'echonest: codegen failed: {0}'.format(e)) - return - - if not res or 'code' not in res[0] or not res[0]['code']: - log.debug(u'echonest: no fingerprint returned') - return - code = res[0]['code'] - - log.debug(u'echonest: calculated fingerprint') - item[FINGERPRINT_KEY] = code - return code - - def identify(self, item): - """Try to identify the song at the EchoNest. - """ - code = self.fingerprint(item) - if not code: - return - - songs = self._echofun(pyechonest.song.identify, code=code) - if not songs: - log.debug(u'echonest: no songs found for fingerprint') - return - - return self._flatten_song(max(songs, key=lambda s: s.score)) - - # "Analyze" (upload the audio itself) method. - - def convert(self, item): - """Converts an item in an unsupported media format to ogg. Config - pending. - This is stolen from Jakob Schnitzers convert plugin. - """ - fd, dest = tempfile.mkstemp(u'.ogg') - os.close(fd) - source = item.path - - log.info(u'echonest: encoding {0} to {1}'.format( - util.displayable_path(source), - util.displayable_path(dest), - )) - - # Build up the FFmpeg command line. - # FIXME: use avconv? - command = u'ffmpeg -i $source -y -acodec libvorbis -vn -aq 2 $dest' - opts = [] - for arg in command.split(): - arg = arg.encode('utf-8') - opts.append(Template(arg).substitute(source=source, dest=dest)) - - # Run the command. - try: - util.command_output(opts) - except (OSError, subprocess.CalledProcessError) as exc: - log.debug(u'echonest: encode failed: {0}'.format(exc)) - util.remove(dest) - return - - log.info(u'echonest: finished encoding {0}'.format( - util.displayable_path(source)) - ) - return dest - - def truncate(self, item): - """Truncates an item to a size less than UPLOAD_MAX_SIZE.""" - fd, dest = tempfile.mkstemp(u'.ogg') - os.close(fd) - source = item.path - - log.info(u'echonest: truncating {0} to {1}'.format( - util.displayable_path(source), - util.displayable_path(dest), - )) - - command = u'ffmpeg -t 300 -i $source -y -acodec copy $dest' - opts = [] - for arg in command.split(): - arg = arg.encode('utf-8') - opts.append(Template(arg).substitute(source=source, dest=dest)) - - # Run the command. - try: - util.command_output(opts) - except (OSError, subprocess.CalledProcessError) as exc: - log.debug(u'echonest: truncate failed: {0}'.format(exc)) - util.remove(dest) - return - - log.info(u'echonest: truncate encoding {0}'.format( - util.displayable_path(source)) - ) - return dest - - def analyze(self, item): - """Upload the item to the EchoNest for analysis. May require to - convert the item to a supported media format. - """ - # Get the file to upload (either by using the file directly or by - # transcoding it first). - source = item.path - if item.format not in ALLOWED_FORMATS: - if config['echonest']['convert']: - source = self.convert(item) - if not source: - log.debug(u'echonest: failed to convert file') - return - else: - return - - if os.stat(item.path).st_size > UPLOAD_MAX_SIZE: - if config['echonest']['truncate']: - source = self.truncate(item) - if not source: - log.debug(u'echonest: failed to truncate file') - return - else: - return - - # Upload the audio file. - log.info(u'echonest: uploading file, please be patient') - track = self._echofun(pyechonest.track.track_from_filename, - filename=source) - if not track: - log.debug(u'echonest: failed to upload file') - return - - # Sometimes we have a track but no song. I guess this happens for - # new / unverified songs. We need to "extract" the audio_summary - # from the track object manually. I don't know why the - # pyechonest API handles tracks (merge audio_summary to __dict__) - # and songs (keep audio_summary in an extra attribute) - # differently. - # Maybe a patch for pyechonest could help? - - # First get the (limited) metadata from the track in case - # there's no associated song. - from_track = {} - for key in ATTRIBUTES: - try: - from_track[key] = getattr(track, key) - except AttributeError: - pass - from_track['duration'] = track.duration - - # Try to look up a song for the full metadata. - try: - song_id = track.song_id - except AttributeError: - return from_track - songs = self._echofun(pyechonest.song.profile, - ids=[song_id], track_ids=[track.id], - buckets=['audio_summary']) - if songs: - pick = self._pick_song(songs, item) - if pick: - return self._flatten_song(pick) - return from_track # Fall back to track metadata. - - # Shared top-level logic. - - def fetch_song(self, item): - """Try all methods to get a matching song object from the - EchoNest. If no method succeeds, return None. - """ - # There are four different ways to get a song. Each method is a - # callable that takes the Item as an argument. - methods = [self.profile, self.search] - if config['echonest']['codegen']: - methods.append(self.identify) - if config['echonest']['upload']: - methods.append(self.analyze) - - # Try each method in turn. - for method in methods: - song = method(item) - if song: - log.debug( - u'echonest: got song through {0}: {1} - {2} [{3}]'.format( - method.__name__, - item.artist, - item.title, - song['duration'], - ) - ) - return song - - def apply_metadata(self, item, values, write=False): - """Copy the metadata from the dictionary of song information to - the item. - """ - # Update each field. - for k, v in values.iteritems(): - if k in ATTRIBUTES: - field = ATTRIBUTES[k] - log.debug(u'echonest: metadata: {0} = {1}'.format(field, v)) - if field == 'bpm': - item[field] = int(v) - else: - item[field] = v - if 'key' in values and 'mode' in values: - key = MUSICAL_SCALE[values['key'] - 1] - if values['mode'] == 0: # Minor key - key += 'm' - item['initial_key'] = key - if 'id' in values: - enid = values['id'] - log.debug(u'echonest: metadata: {0} = {1}'.format(ID_KEY, enid)) - item[ID_KEY] = enid - - # Write and save. - if write: - item.try_write() - item.store() - - # Automatic (on-import) metadata fetching. - - def imported(self, session, task): - """Import pipeline stage. - """ - for item in task.imported_items(): - song = self.fetch_song(item) - if song: - self.apply_metadata(item, song) - - # Explicit command invocation. - - def requires_update(self, item): - """Check if this item requires an update from the EchoNest (its - data is missing). - """ - for field in ATTRIBUTES.values(): - if not item.get(field): - return True - log.info(u'echonest: no update required') - return False - - def commands(self): - fetch_cmd = ui.Subcommand('echonest', - help='Fetch metadata from the EchoNest') - fetch_cmd.parser.add_option( - '-f', '--force', dest='force', action='store_true', default=False, - help='(re-)download information from the EchoNest' - ) - - def fetch_func(lib, opts, args): - self.config.set_args(opts) - write = config['import']['write'].get(bool) - for item in lib.items(ui.decargs(args)): - log.info(u'echonest: {0} - {1}'.format(item.artist, - item.title)) - if self.config['force'] or self.requires_update(item): - song = self.fetch_song(item) - if song: - self.apply_metadata(item, song, write) - - fetch_cmd.func = fetch_func - - sim_cmd = ui.Subcommand('echosim', help='show related files') - sim_cmd.parser.add_option( - '-t', '--threshold', dest='threshold', action='store', - type='float', default=0.15, help='Set difference threshold' - ) - sim_cmd.parser.add_option( - '-f', '--format', action='store', default='${difference}: ${path}', - help='print with custom format' - ) - - def sim_func(lib, opts, args): - self.config.set_args(opts) - for item in lib.items(ui.decargs(args)): - similar(lib, item, opts.threshold, opts.format) - - sim_cmd.func = sim_func - - return [fetch_cmd, sim_cmd] diff -Nru beets-1.3.8+dfsg/beetsplug/echonest_tempo.py beets-1.3.19/beetsplug/echonest_tempo.py --- beets-1.3.8+dfsg/beetsplug/echonest_tempo.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/echonest_tempo.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -# This file is part of beets. -# Copyright 2013, David Brenner -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Gets tempo (bpm) for imported music from the EchoNest API. Requires -the pyechonest library (https://github.com/echonest/pyechonest). -""" -import time -import logging -from beets.plugins import BeetsPlugin -from beets import ui -from beets import config -import pyechonest.config -import pyechonest.song -import socket - - -# Global logger. -log = logging.getLogger('beets') - -RETRY_INTERVAL = 10 # Seconds. -RETRIES = 10 - - -def fetch_item_tempo(lib, loglevel, item, write): - """Fetch and store tempo for a single item. If ``write``, then the - tempo will also be written to the file itself in the bpm field. The - ``loglevel`` parameter controls the visibility of the function's - status log messages. - """ - # Skip if the item already has the tempo field. - if item.bpm: - log.log(loglevel, u'bpm already present: {0} - {1}' - .format(item.artist, item.title)) - return - - # Fetch tempo. - tempo = get_tempo(item.artist, item.title, item.length) - if not tempo: - log.log(loglevel, u'tempo not found: {0} - {1}' - .format(item.artist, item.title)) - return - - log.log(loglevel, u'fetched tempo: {0} - {1}' - .format(item.artist, item.title)) - item.bpm = int(tempo) - if write: - item.try_write() - item.store() - - -def get_tempo(artist, title, duration): - """Get the tempo for a song.""" - # We must have sufficient metadata for the lookup. Otherwise the API - # will just complain. - artist = artist.replace(u'\n', u' ').strip().lower() - title = title.replace(u'\n', u' ').strip().lower() - if not artist or not title: - return None - - for i in range(RETRIES): - try: - # Unfortunately, all we can do is search by artist and title. - # EchoNest supports foreign ids from MusicBrainz, but currently - # only for artists, not individual tracks/recordings. - results = pyechonest.song.search( - artist=artist, title=title, results=100, - buckets=['audio_summary'] - ) - except pyechonest.util.EchoNestAPIError as e: - if e.code == 3: - # Wait and try again. - time.sleep(RETRY_INTERVAL) - else: - log.warn(u'echonest_tempo: {0}'.format(e.args[0][0])) - return None - except (pyechonest.util.EchoNestIOError, socket.error) as e: - log.debug(u'echonest_tempo: IO error: {0}'.format(e)) - time.sleep(RETRY_INTERVAL) - else: - break - else: - # If we exited the loop without breaking, then we used up all - # our allotted retries. - log.debug(u'echonest_tempo: exceeded retries') - return None - - # The Echo Nest API can return songs that are not perfect matches. - # So we look through the results for songs that have the right - # artist and title. The API also doesn't have MusicBrainz track IDs; - # otherwise we could use those for a more robust match. - min_distance = duration - pick = None - for result in results: - if result.artist_name.lower() == artist and \ - result.title.lower() == title: - distance = abs(duration - result.audio_summary['duration']) - log.debug( - u'echonest_tempo: candidate {0:2.2f} ' - u'(distance: {1:2.2f}) = {2}'.format( - result.audio_summary['duration'], - distance, - result.audio_summary['tempo'], - ) - ) - if distance < min_distance: - min_distance = distance - pick = result.audio_summary['tempo'] - return pick - - -class EchoNestTempoPlugin(BeetsPlugin): - def __init__(self): - super(EchoNestTempoPlugin, self).__init__() - self.import_stages = [self.imported] - self.config.add({ - 'apikey': u'NY2KTZHQ0QDSHBAP6', - 'auto': True, - }) - - pyechonest.config.ECHO_NEST_API_KEY = \ - self.config['apikey'].get(unicode) - - def commands(self): - cmd = ui.Subcommand('tempo', help='fetch song tempo (bpm)') - cmd.parser.add_option('-p', '--print', dest='printbpm', - action='store_true', default=False, - help='print tempo (bpm) to console') - - def func(lib, opts, args): - # The "write to files" option corresponds to the - # import_write config value. - write = config['import']['write'].get(bool) - - for item in lib.items(ui.decargs(args)): - fetch_item_tempo(lib, logging.INFO, item, write) - if opts.printbpm and item.bpm: - ui.print_('{0} BPM'.format(item.bpm)) - cmd.func = func - return [cmd] - - # Auto-fetch tempo on import. - def imported(self, config, task): - if self.config['auto']: - for item in task.imported_items(): - fetch_item_tempo(config.lib, logging.DEBUG, item, False) diff -Nru beets-1.3.8+dfsg/beetsplug/edit.py beets-1.3.19/beetsplug/edit.py --- beets-1.3.8+dfsg/beetsplug/edit.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/edit.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,392 @@ +# This file is part of beets. +# Copyright 2016 +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Open metadata information in a text editor to let the user edit it. +""" +from __future__ import division, absolute_import, print_function + +from beets import plugins +from beets import util +from beets import ui +from beets.dbcore import types +from beets.importer import action +from beets.ui.commands import _do_query, PromptChoice +from copy import deepcopy +import subprocess +import yaml +from tempfile import NamedTemporaryFile +import os + + +# These "safe" types can avoid the format/parse cycle that most fields go +# through: they are safe to edit with native YAML types. +SAFE_TYPES = (types.Float, types.Integer, types.Boolean) + + +class ParseError(Exception): + """The modified file is unreadable. The user should be offered a chance to + fix the error. + """ + + +def edit(filename, log): + """Open `filename` in a text editor. + """ + cmd = util.shlex_split(util.editor_command()) + cmd.append(filename) + log.debug(u'invoking editor command: {!r}', cmd) + try: + subprocess.call(cmd) + except OSError as exc: + raise ui.UserError(u'could not run editor command {!r}: {}'.format( + cmd[0], exc + )) + + +def dump(arg): + """Dump a sequence of dictionaries as YAML for editing. + """ + return yaml.safe_dump_all( + arg, + allow_unicode=True, + default_flow_style=False, + ) + + +def load(s): + """Read a sequence of YAML documents back to a list of dictionaries + with string keys. + + Can raise a `ParseError`. + """ + try: + out = [] + for d in yaml.load_all(s): + if not isinstance(d, dict): + raise ParseError( + u'each entry must be a dictionary; found {}'.format( + type(d).__name__ + ) + ) + + # Convert all keys to strings. They started out as strings, + # but the user may have inadvertently messed this up. + out.append({unicode(k): v for k, v in d.items()}) + + except yaml.YAMLError as e: + raise ParseError(u'invalid YAML: {}'.format(e)) + return out + + +def _safe_value(obj, key, value): + """Check whether the `value` is safe to represent in YAML and trust as + returned from parsed YAML. + + This ensures that values do not change their type when the user edits their + YAML representation. + """ + typ = obj._type(key) + return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type) + + +def flatten(obj, fields): + """Represent `obj`, a `dbcore.Model` object, as a dictionary for + serialization. Only include the given `fields` if provided; + otherwise, include everything. + + The resulting dictionary's keys are strings and the values are + safely YAML-serializable types. + """ + # Format each value. + d = {} + for key in obj.keys(): + value = obj[key] + if _safe_value(obj, key, value): + # A safe value that is faithfully representable in YAML. + d[key] = value + else: + # A value that should be edited as a string. + d[key] = obj.formatted()[key] + + # Possibly filter field names. + if fields: + return {k: v for k, v in d.items() if k in fields} + else: + return d + + +def apply_(obj, data): + """Set the fields of a `dbcore.Model` object according to a + dictionary. + + This is the opposite of `flatten`. The `data` dictionary should have + strings as values. + """ + for key, value in data.items(): + if _safe_value(obj, key, value): + # A safe value *stayed* represented as a safe type. Assign it + # directly. + obj[key] = value + else: + # Either the field was stringified originally or the user changed + # it from a safe type to an unsafe one. Parse it as a string. + obj.set_parse(key, unicode(value)) + + +class EditPlugin(plugins.BeetsPlugin): + + def __init__(self): + super(EditPlugin, self).__init__() + + self.config.add({ + # The default fields to edit. + 'albumfields': 'album albumartist', + 'itemfields': 'track title artist album', + + # Silently ignore any changes to these fields. + 'ignore_fields': 'id path', + }) + + self.register_listener('before_choose_candidate', + self.before_choose_candidate_listener) + + def commands(self): + edit_command = ui.Subcommand( + 'edit', + help=u'interactively edit metadata' + ) + edit_command.parser.add_option( + u'-f', u'--field', + metavar='FIELD', + action='append', + help=u'edit this field also', + ) + edit_command.parser.add_option( + u'--all', + action='store_true', dest='all', + help=u'edit all fields', + ) + edit_command.parser.add_album_option() + edit_command.func = self._edit_command + return [edit_command] + + def _edit_command(self, lib, opts, args): + """The CLI command function for the `beet edit` command. + """ + # Get the objects to edit. + query = ui.decargs(args) + items, albums = _do_query(lib, query, opts.album, False) + objs = albums if opts.album else items + if not objs: + ui.print_(u'Nothing to edit.') + return + + # Get the fields to edit. + if opts.all: + fields = None + else: + fields = self._get_fields(opts.album, opts.field) + self.edit(opts.album, objs, fields) + + def _get_fields(self, album, extra): + """Get the set of fields to edit. + """ + # Start with the configured base fields. + if album: + fields = self.config['albumfields'].as_str_seq() + else: + fields = self.config['itemfields'].as_str_seq() + + # Add the requested extra fields. + if extra: + fields += extra + + # Ensure we always have the `id` field for identification. + fields.append('id') + + return set(fields) + + def edit(self, album, objs, fields): + """The core editor function. + + - `album`: A flag indicating whether we're editing Items or Albums. + - `objs`: The `Item`s or `Album`s to edit. + - `fields`: The set of field names to edit (or None to edit + everything). + """ + # Present the YAML to the user and let her change it. + success = self.edit_objects(objs, fields) + + # Save the new data. + if success: + self.save_changes(objs) + + def edit_objects(self, objs, fields): + """Dump a set of Model objects to a file as text, ask the user + to edit it, and apply any changes to the objects. + + Return a boolean indicating whether the edit succeeded. + """ + # Get the content to edit as raw data structures. + old_data = [flatten(o, fields) for o in objs] + + # Set up a temporary file with the initial data for editing. + new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) + old_str = dump(old_data) + new.write(old_str) + new.close() + + # Loop until we have parseable data and the user confirms. + try: + while True: + # Ask the user to edit the data. + edit(new.name, self._log) + + # Read the data back after editing and check whether anything + # changed. + with open(new.name) as f: + new_str = f.read() + if new_str == old_str: + ui.print_(u"No changes; aborting.") + return False + + # Parse the updated data. + try: + new_data = load(new_str) + except ParseError as e: + ui.print_(u"Could not read data: {}".format(e)) + if ui.input_yn(u"Edit again to fix? (Y/n)", True): + continue + else: + return False + + # Show the changes. + # If the objects are not on the DB yet, we need a copy of their + # original state for show_model_changes. + objs_old = [deepcopy(obj) if not obj._db else None + for obj in objs] + self.apply_data(objs, old_data, new_data) + changed = False + for obj, obj_old in zip(objs, objs_old): + changed |= ui.show_model_changes(obj, obj_old) + if not changed: + ui.print_(u'No changes to apply.') + return False + + # Confirm the changes. + choice = ui.input_options( + (u'continue Editing', u'apply', u'cancel') + ) + if choice == u'a': # Apply. + return True + elif choice == u'c': # Cancel. + return False + elif choice == u'e': # Keep editing. + # Reset the temporary changes to the objects. + for obj in objs: + obj.read() + continue + + # Remove the temporary file before returning. + finally: + os.remove(new.name) + + def apply_data(self, objs, old_data, new_data): + """Take potentially-updated data and apply it to a set of Model + objects. + + The objects are not written back to the database, so the changes + are temporary. + """ + if len(old_data) != len(new_data): + self._log.warn(u'number of objects changed from {} to {}', + len(old_data), len(new_data)) + + obj_by_id = {o.id: o for o in objs} + ignore_fields = self.config['ignore_fields'].as_str_seq() + for old_dict, new_dict in zip(old_data, new_data): + # Prohibit any changes to forbidden fields to avoid + # clobbering `id` and such by mistake. + forbidden = False + for key in ignore_fields: + if old_dict.get(key) != new_dict.get(key): + self._log.warn(u'ignoring object whose {} changed', key) + forbidden = True + break + if forbidden: + continue + + id_ = int(old_dict['id']) + apply_(obj_by_id[id_], new_dict) + + def save_changes(self, objs): + """Save a list of updated Model objects to the database. + """ + # Save to the database and possibly write tags. + for ob in objs: + if ob._dirty: + self._log.debug(u'saving changes to {}', ob) + ob.try_sync(ui.should_write(), ui.should_move()) + + # Methods for interactive importer execution. + + def before_choose_candidate_listener(self, session, task): + """Append an "Edit" choice and an "edit Candidates" choice (if + there are candidates) to the interactive importer prompt. + """ + choices = [PromptChoice('d', 'eDit', self.importer_edit)] + if task.candidates: + choices.append(PromptChoice('c', 'edit Candidates', + self.importer_edit_candidate)) + + return choices + + def importer_edit(self, session, task): + """Callback for invoking the functionality during an interactive + import session on the *original* item tags. + """ + # Assign temporary ids to the Items. + for i, obj in enumerate(task.items): + obj.id = i + 1 + + # Present the YAML to the user and let her change it. + fields = self._get_fields(album=False, extra=[]) + success = self.edit_objects(task.items, fields) + + # Remove temporary ids. + for obj in task.items: + obj.id = None + + # Save the new data. + if success: + # Return action.RETAG, which makes the importer write the tags + # to the files if needed without re-applying metadata. + return action.RETAG + else: + # Edit cancelled / no edits made. Revert changes. + for obj in task.items: + obj.read() + + def importer_edit_candidate(self, session, task): + """Callback for invoking the functionality during an interactive + import session on a *candidate*. The candidate's metadata is + applied to the original items. + """ + # Prompt the user for a candidate. + sel = ui.input_options([], numrange=(1, len(task.candidates))) + # Force applying the candidate on the items. + task.match = task.candidates[sel - 1] + task.apply_metadata() + + return self.importer_edit(session, task) diff -Nru beets-1.3.8+dfsg/beetsplug/embedart.py beets-1.3.19/beetsplug/embedart.py --- beets-1.3.8+dfsg/beetsplug/embedart.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/embedart.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,19 +14,17 @@ # included in all copies or substantial portions of the Software. """Allows beets to embed album art into file metadata.""" +from __future__ import division, absolute_import, print_function + import os.path -import logging -import imghdr from beets.plugins import BeetsPlugin -from beets import mediafile from beets import ui from beets.ui import decargs -from beets.util import syspath, normpath, displayable_path +from beets.util import syspath, normpath, displayable_path, bytestring_path from beets.util.artresizer import ArtResizer from beets import config - -log = logging.getLogger('beets') +from beets import art class EmbedCoverArtPlugin(BeetsPlugin): @@ -36,159 +35,120 @@ self.config.add({ 'maxwidth': 0, 'auto': True, + 'compare_threshold': 0, + 'ifempty': False, + 'remove_art_file': False }) - if self.config['maxwidth'].get(int) and \ - not ArtResizer.shared.local: + + if self.config['maxwidth'].get(int) and not ArtResizer.shared.local: self.config['maxwidth'] = 0 - log.warn(u"embedart: ImageMagick or PIL not found; " - u"'maxwidth' option ignored") + self._log.warning(u"ImageMagick or PIL not found; " + u"'maxwidth' option ignored") + if self.config['compare_threshold'].get(int) and not \ + ArtResizer.shared.can_compare: + self.config['compare_threshold'] = 0 + self._log.warning(u"ImageMagick 6.8.7 or higher not installed; " + u"'compare_threshold' option ignored") + + self.register_listener('art_set', self.process_album) def commands(self): # Embed command. embed_cmd = ui.Subcommand( - 'embedart', help='embed image files into file metadata' + 'embedart', help=u'embed image files into file metadata' ) embed_cmd.parser.add_option( - '-f', '--file', metavar='PATH', help='the image file to embed' + u'-f', u'--file', metavar='PATH', help=u'the image file to embed' ) - maxwidth = config['embedart']['maxwidth'].get(int) + maxwidth = self.config['maxwidth'].get(int) + compare_threshold = self.config['compare_threshold'].get(int) + ifempty = self.config['ifempty'].get(bool) def embed_func(lib, opts, args): if opts.file: imagepath = normpath(opts.file) + if not os.path.isfile(syspath(imagepath)): + raise ui.UserError(u'image file {0} not found'.format( + displayable_path(imagepath) + )) for item in lib.items(decargs(args)): - embed_item(item, imagepath, maxwidth) + art.embed_item(self._log, item, imagepath, maxwidth, None, + compare_threshold, ifempty) else: for album in lib.albums(decargs(args)): - embed_album(album, maxwidth) + art.embed_album(self._log, album, maxwidth, False, + compare_threshold, ifempty) + self.remove_artfile(album) embed_cmd.func = embed_func # Extract command. - extract_cmd = ui.Subcommand('extractart', - help='extract an image from file metadata') - extract_cmd.parser.add_option('-o', dest='outpath', - help='image output file') + extract_cmd = ui.Subcommand( + 'extractart', + help=u'extract an image from file metadata', + ) + extract_cmd.parser.add_option( + u'-o', dest='outpath', + help=u'image output file', + ) + extract_cmd.parser.add_option( + u'-n', dest='filename', + help=u'image filename to create for all matched albums', + ) + extract_cmd.parser.add_option( + '-a', dest='associate', action='store_true', + help='associate the extracted images with the album', + ) def extract_func(lib, opts, args): - outpath = normpath(opts.outpath or 'cover') - extract(lib, outpath, decargs(args)) + if opts.outpath: + art.extract_first(self._log, normpath(opts.outpath), + lib.items(decargs(args))) + else: + filename = bytestring_path(opts.filename or + config['art_filename'].get()) + if os.path.dirname(filename) != b'': + self._log.error( + u"Only specify a name rather than a path for -n") + return + for album in lib.albums(decargs(args)): + artpath = normpath(os.path.join(album.path, filename)) + artpath = art.extract_first(self._log, artpath, + album.items()) + if artpath and opts.associate: + album.set_art(artpath) + album.store() extract_cmd.func = extract_func # Clear command. - clear_cmd = ui.Subcommand('clearart', - help='remove images from file metadata') + clear_cmd = ui.Subcommand( + 'clearart', + help=u'remove images from file metadata', + ) def clear_func(lib, opts, args): - clear(lib, decargs(args)) + art.clear(self._log, lib, decargs(args)) clear_cmd.func = clear_func return [embed_cmd, extract_cmd, clear_cmd] - -@EmbedCoverArtPlugin.listen('album_imported') -def album_imported(lib, album): - """Automatically embed art into imported albums. - """ - if album.artpath and config['embedart']['auto']: - embed_album(album, config['embedart']['maxwidth'].get(int)) - - -def embed_item(item, imagepath, maxwidth=None, itempath=None): - """Embed an image into the item's media file. - """ - try: - item['images'] = [_mediafile_image(imagepath, maxwidth)] - item.try_write(itempath) - except IOError as exc: - log.error(u'embedart: could not read image file: {0}'.format(exc)) - finally: - # We don't want to store the image in the database - del item['images'] - - -def embed_album(album, maxwidth=None): - """Embed album art into all of the album's items. - """ - imagepath = album.artpath - if not imagepath: - log.info(u'No album art present: {0} - {1}'. - format(album.albumartist, album.album)) - return - if not os.path.isfile(imagepath): - log.error(u'Album art not found at {0}' - .format(imagepath)) - return - - log.info(u'Embedding album art into {0.albumartist} - {0.album}.' - .format(album)) - - for item in album.items(): - embed_item(item, imagepath, maxwidth) - - -def _mediafile_image(image_path, maxwidth=None): - """Return a `mediafile.Image` object for the path. - - If maxwidth is set the image is resized if necessary. - """ - if maxwidth: - image_path = ArtResizer.shared.resize(maxwidth, syspath(image_path)) - - with open(syspath(image_path), 'rb') as f: - data = f.read() - return mediafile.Image(data, type=mediafile.ImageType.front) - - -# 'extractart' command. - -def extract(lib, outpath, query): - item = lib.items(query).get() - if not item: - log.error(u'No item matches query.') - return - - # Extract the art. - try: - mf = mediafile.MediaFile(syspath(item.path)) - except mediafile.UnreadableFileError as exc: - log.error(u'Could not extract art from {0}: {1}'.format( - displayable_path(item.path), exc - )) - return - - art = mf.art - if not art: - log.error(u'No album art present in {0} - {1}.' - .format(item.artist, item.title)) - return - - # Add an extension to the filename. - ext = imghdr.what(None, h=art) - if not ext: - log.error(u'Unknown image type.') - return - outpath += '.' + ext - - log.info(u'Extracting album art from: {0.artist} - {0.title}\n' - u'To: {1}'.format(item, displayable_path(outpath))) - with open(syspath(outpath), 'wb') as f: - f.write(art) - - -# 'clearart' command. - -def clear(lib, query): - log.info(u'Clearing album art from items:') - for item in lib.items(query): - log.info(u'{0} - {1}'.format(item.artist, item.title)) - try: - mf = mediafile.MediaFile(syspath(item.path), - config['id3v23'].get(bool)) - except mediafile.UnreadableFileError as exc: - log.error(u'Could not clear art from {0}: {1}'.format( - displayable_path(item.path), exc - )) - continue - mf.art = None - mf.save() + def process_album(self, album): + """Automatically embed art after art has been set + """ + if self.config['auto'] and ui.should_write(): + max_width = self.config['maxwidth'].get(int) + art.embed_album(self._log, album, max_width, True, + self.config['compare_threshold'].get(int), + self.config['ifempty'].get(bool)) + self.remove_artfile(album) + + def remove_artfile(self, album): + """Possibly delete the album art file for an album (if the + appropriate configuration option is enabled. + """ + if self.config['remove_art_file'] and album.artpath: + if os.path.isfile(album.artpath): + self._log.debug(u'Removing album art file for {0}', album) + os.remove(album.artpath) + album.artpath = None + album.store() diff -Nru beets-1.3.8+dfsg/beetsplug/embyupdate.py beets-1.3.19/beetsplug/embyupdate.py --- beets-1.3.8+dfsg/beetsplug/embyupdate.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/embyupdate.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +"""Updates the Emby Library whenever the beets library is changed. + + emby: + host: localhost + port: 8096 + username: user + password: password +""" +from __future__ import division, absolute_import, print_function + +from beets import config +from beets.plugins import BeetsPlugin +from urllib import urlencode +from urlparse import urljoin, parse_qs, urlsplit, urlunsplit +import hashlib +import requests + + +def api_url(host, port, endpoint): + """Returns a joined url. + """ + joined = urljoin('http://{0}:{1}'.format(host, port), endpoint) + scheme, netloc, path, query_string, fragment = urlsplit(joined) + query_params = parse_qs(query_string) + + query_params['format'] = ['json'] + new_query_string = urlencode(query_params, doseq=True) + + return urlunsplit((scheme, netloc, path, new_query_string, fragment)) + + +def password_data(username, password): + """Returns a dict with username and its encoded password. + """ + return { + 'username': username, + 'password': hashlib.sha1(password.encode('utf-8')).hexdigest(), + 'passwordMd5': hashlib.md5(password.encode('utf-8')).hexdigest() + } + + +def create_headers(user_id, token=None): + """Return header dict that is needed to talk to the Emby API. + """ + headers = { + 'Authorization': 'MediaBrowser', + 'UserId': user_id, + 'Client': 'other', + 'Device': 'empy', + 'DeviceId': 'beets', + 'Version': '0.0.0' + } + + if token: + headers['X-MediaBrowser-Token'] = token + + return headers + + +def get_token(host, port, headers, auth_data): + """Return token for a user. + """ + url = api_url(host, port, '/Users/AuthenticateByName') + r = requests.post(url, headers=headers, data=auth_data) + + return r.json().get('AccessToken') + + +def get_user(host, port, username): + """Return user dict from server or None if there is no user. + """ + url = api_url(host, port, '/Users/Public') + r = requests.get(url) + user = [i for i in r.json() if i['Name'] == username] + + return user + + +class EmbyUpdate(BeetsPlugin): + def __init__(self): + super(EmbyUpdate, self).__init__() + + # Adding defaults. + config['emby'].add({ + u'host': u'localhost', + u'port': 8096 + }) + + self.register_listener('database_change', self.listen_for_db_change) + + def listen_for_db_change(self, lib, model): + """Listens for beets db change and register the update for the end. + """ + self.register_listener('cli_exit', self.update) + + def update(self, lib): + """When the client exists try to send refresh request to Emby. + """ + self._log.info(u'Updating Emby library...') + + host = config['emby']['host'].get() + port = config['emby']['port'].get() + username = config['emby']['username'].get() + password = config['emby']['password'].get() + + # Get user information from the Emby API. + user = get_user(host, port, username) + if not user: + self._log.warning(u'User {0} could not be found.'.format(username)) + return + + # Create Authentication data and headers. + auth_data = password_data(username, password) + headers = create_headers(user[0]['Id']) + + # Get authentication token. + token = get_token(host, port, headers, auth_data) + if not token: + self._log.warning( + u'Could not get token for user {0}', username + ) + return + + # Recreate headers with a token. + headers = create_headers(user[0]['Id'], token=token) + + # Trigger the Update. + url = api_url(host, port, '/Library/Refresh') + r = requests.post(url, headers=headers) + if r.status_code != 204: + self._log.warning(u'Update could not be triggered') + else: + self._log.info(u'Update triggered.') diff -Nru beets-1.3.8+dfsg/beetsplug/export.py beets-1.3.19/beetsplug/export.py --- beets-1.3.8+dfsg/beetsplug/export.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/export.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Exports data from beets +""" + +from __future__ import division, absolute_import, print_function + +import sys +import json +import codecs + +from datetime import datetime, date +from beets.plugins import BeetsPlugin +from beets import ui +from beets import mediafile +from beetsplug.info import make_key_filter, library_data, tag_data + + +class ExportEncoder(json.JSONEncoder): + """Deals with dates because JSON doesn't have a standard""" + def default(self, o): + if isinstance(o, datetime) or isinstance(o, date): + return o.isoformat() + return json.JSONEncoder.default(self, o) + + +class ExportPlugin(BeetsPlugin): + + def __init__(self): + super(ExportPlugin, self).__init__() + + self.config.add({ + 'default_format': 'json', + 'json': { + # json module formatting options + 'formatting': { + 'ensure_ascii': False, + 'indent': 4, + 'separators': (',', ': '), + 'sort_keys': True + } + }, + # TODO: Use something like the edit plugin + # 'item_fields': [] + }) + + def commands(self): + # TODO: Add option to use albums + + cmd = ui.Subcommand('export', help=u'export data from beets') + cmd.func = self.run + cmd.parser.add_option( + u'-l', u'--library', action='store_true', + help=u'show library fields instead of tags', + ) + cmd.parser.add_option( + u'--append', action='store_true', default=False, + help=u'if should append data to the file', + ) + cmd.parser.add_option( + u'-i', u'--include-keys', default=[], + action='append', dest='included_keys', + help=u'comma separated list of keys to show', + ) + cmd.parser.add_option( + u'-o', u'--output', + help=u'path for the output file. If not given, will print the data' + ) + return [cmd] + + def run(self, lib, opts, args): + + file_path = opts.output + file_format = self.config['default_format'].get(str) + file_mode = 'a' if opts.append else 'w' + format_options = self.config[file_format]['formatting'].get(dict) + + export_format = ExportFormat.factory( + file_format, **{ + 'file_path': file_path, + 'file_mode': file_mode + } + ) + + items = [] + data_collector = library_data if opts.library else tag_data + + included_keys = [] + for keys in opts.included_keys: + included_keys.extend(keys.split(',')) + key_filter = make_key_filter(included_keys) + + for data_emitter in data_collector(lib, ui.decargs(args)): + try: + data, item = data_emitter() + except (mediafile.UnreadableFileError, IOError) as ex: + self._log.error(u'cannot read file: {0}', ex) + continue + + data = key_filter(data) + items += [data] + + export_format.export(items, **format_options) + + +class ExportFormat(object): + """The output format type""" + + @classmethod + def factory(cls, type, **kwargs): + if type == "json": + if kwargs['file_path']: + return JsonFileFormat(**kwargs) + else: + return JsonPrintFormat() + raise NotImplementedError() + + def export(self, data, **kwargs): + raise NotImplementedError() + + +class JsonPrintFormat(ExportFormat): + """Outputs to the console""" + + def export(self, data, **kwargs): + json.dump(data, sys.stdout, cls=ExportEncoder, **kwargs) + + +class JsonFileFormat(ExportFormat): + """Saves in a json file""" + + def __init__(self, file_path, file_mode=u'w', encoding=u'utf-8'): + self.path = file_path + self.mode = file_mode + self.encoding = encoding + + def export(self, data, **kwargs): + with codecs.open(self.path, self.mode, self.encoding) as f: + json.dump(data, f, cls=ExportEncoder, **kwargs) diff -Nru beets-1.3.8+dfsg/beetsplug/fetchart.py beets-1.3.19/beetsplug/fetchart.py --- beets-1.3.8+dfsg/beetsplug/fetchart.py 2014-09-14 00:19:04.000000000 +0000 +++ beets-1.3.19/beetsplug/fetchart.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,305 +15,769 @@ """Fetches album art. """ +from __future__ import division, absolute_import, print_function + from contextlib import closing -import logging import os import re from tempfile import NamedTemporaryFile import requests -from beets.plugins import BeetsPlugin -from beets.util.artresizer import ArtResizer +from beets import plugins from beets import importer from beets import ui from beets import util from beets import config +from beets.mediafile import _image_mime_type +from beets.util.artresizer import ArtResizer +from beets.util import confit +from beets.util import syspath, bytestring_path -IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg'] -CONTENT_TYPES = ('image/jpeg',) -DOWNLOAD_EXTENSION = '.jpg' - -log = logging.getLogger('beets') - -requests_session = requests.Session() -requests_session.headers = {'User-Agent': 'beets'} - - -def _fetch_image(url): - """Downloads an image from a URL and checks whether it seems to - actually be an image. If so, returns a path to the downloaded image. - Otherwise, returns None. +try: + import itunes + HAVE_ITUNES = True +except ImportError: + HAVE_ITUNES = False + +CONTENT_TYPES = { + 'image/jpeg': [b'jpg', b'jpeg'], + 'image/png': [b'png'] +} +IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts] + + +class Candidate(object): + """Holds information about a matching artwork, deals with validation of + dimension restrictions and resizing. """ - log.debug(u'fetchart: downloading art: {0}'.format(url)) - try: - with closing(requests_session.get(url, stream=True)) as resp: - if 'Content-Type' not in resp.headers \ - or resp.headers['Content-Type'] not in CONTENT_TYPES: - log.debug(u'fetchart: not an image') - return + CANDIDATE_BAD = 0 + CANDIDATE_EXACT = 1 + CANDIDATE_DOWNSCALE = 2 + + MATCH_EXACT = 0 + MATCH_FALLBACK = 1 + + def __init__(self, log, path=None, url=None, source=u'', + match=None, size=None): + self._log = log + self.path = path + self.url = url + self.source = source + self.check = None + self.match = match + self.size = size + + def _validate(self, extra): + """Determine whether the candidate artwork is valid based on + its dimensions (width and ratio). + + Return `CANDIDATE_BAD` if the file is unusable. + Return `CANDIDATE_EXACT` if the file is usable as-is. + Return `CANDIDATE_DOWNSCALE` if the file must be resized. + """ + if not self.path: + return self.CANDIDATE_BAD + + if not (extra['enforce_ratio'] or + extra['minwidth'] or + extra['maxwidth']): + return self.CANDIDATE_EXACT + + # get_size returns None if no local imaging backend is available + if not self.size: + self.size = ArtResizer.shared.get_size(self.path) + self._log.debug(u'image size: {}', self.size) + + if not self.size: + self._log.warning(u'Could not get size of image (please see ' + u'documentation for dependencies). ' + u'The configuration options `minwidth` and ' + u'`enforce_ratio` may be violated.') + return self.CANDIDATE_EXACT + + short_edge = min(self.size) + long_edge = max(self.size) + + # Check minimum size. + if extra['minwidth'] and self.size[0] < extra['minwidth']: + self._log.debug(u'image too small ({} < {})', + self.size[0], extra['minwidth']) + return self.CANDIDATE_BAD + + # Check aspect ratio. + edge_diff = long_edge - short_edge + if extra['enforce_ratio']: + if extra['margin_px']: + if edge_diff > extra['margin_px']: + self._log.debug(u'image is not close enough to being ' + u'square, ({} - {} > {})', + long_edge, short_edge, extra['margin_px']) + return self.CANDIDATE_BAD + elif extra['margin_percent']: + margin_px = extra['margin_percent'] * long_edge + if edge_diff > margin_px: + self._log.debug(u'image is not close enough to being ' + u'square, ({} - {} > {})', + long_edge, short_edge, margin_px) + return self.CANDIDATE_BAD + elif edge_diff: + # also reached for margin_px == 0 and margin_percent == 0.0 + self._log.debug(u'image is not square ({} != {})', + self.size[0], self.size[1]) + return self.CANDIDATE_BAD + + # Check maximum size. + if extra['maxwidth'] and self.size[0] > extra['maxwidth']: + self._log.debug(u'image needs resizing ({} > {})', + self.size[0], extra['maxwidth']) + return self.CANDIDATE_DOWNSCALE + + return self.CANDIDATE_EXACT + + def validate(self, extra): + self.check = self._validate(extra) + return self.check + + def resize(self, extra): + if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE: + self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path) + + +def _logged_get(log, *args, **kwargs): + """Like `requests.get`, but logs the effective URL to the specified + `log` at the `DEBUG` level. - # Generate a temporary file with the correct extension. - with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION, delete=False) \ - as fh: - for chunk in resp.iter_content(): - fh.write(chunk) - log.debug(u'fetchart: downloaded art to: {0}'.format( - util.displayable_path(fh.name) - )) - return fh.name - except (IOError, requests.RequestException): - log.debug(u'fetchart: error fetching art') + Use the optional `message` parameter to specify what to log before + the URL. By default, the string is "getting URL". - -# ART SOURCES ################################################################ - -# Cover Art Archive. - -CAA_URL = 'http://coverartarchive.org/release/{mbid}/front-500.jpg' -CAA_GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front-500.jpg' - - -def caa_art(release_id): - """Return the Cover Art Archive URL given a MusicBrainz release ID. + Also sets the User-Agent header to indicate beets. """ - return CAA_URL.format(mbid=release_id) - + # Use some arguments with the `send` call but most with the + # `Request` construction. This is a cheap, magic-filled way to + # emulate `requests.get` or, more pertinently, + # `requests.Session.request`. + req_kwargs = kwargs + send_kwargs = {} + for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'): + if arg in kwargs: + send_kwargs[arg] = req_kwargs.pop(arg) + + # Our special logging message parameter. + if 'message' in kwargs: + message = kwargs.pop('message') + else: + message = 'getting URL' -def caa_group_art(release_group_id): - """Return the Cover Art Archive release group URL given a MusicBrainz - release group ID. - """ - return CAA_GROUP_URL.format(mbid=release_group_id) + req = requests.Request('GET', *args, **req_kwargs) + with requests.Session() as s: + s.headers = {'User-Agent': 'beets'} + prepped = s.prepare_request(req) + log.debug('{}: {}', message, prepped.url) + return s.send(prepped, **send_kwargs) -# Art from Amazon. +class RequestMixin(object): + """Adds a Requests wrapper to the class that uses the logger, which + must be named `self._log`. + """ -AMAZON_URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg' -AMAZON_INDICES = (1, 2) + def request(self, *args, **kwargs): + """Like `requests.get`, but uses the logger `self._log`. + See also `_logged_get`. + """ + return _logged_get(self._log, *args, **kwargs) -def art_for_asin(asin): - """Generate URLs for an Amazon ID (ASIN) string.""" - for index in AMAZON_INDICES: - yield AMAZON_URL % (asin, index) +# ART SOURCES ################################################################ -# AlbumArt.org scraper. +class ArtSource(RequestMixin): + def __init__(self, log, config): + self._log = log + self._config = config + + def get(self, album, extra): + raise NotImplementedError() + + def _candidate(self, **kwargs): + return Candidate(source=self, log=self._log, **kwargs) + + def fetch_image(self, candidate, extra): + raise NotImplementedError() + + +class LocalArtSource(ArtSource): + IS_LOCAL = True + LOC_STR = u'local' + + def fetch_image(self, candidate, extra): + pass + + +class RemoteArtSource(ArtSource): + IS_LOCAL = False + LOC_STR = u'remote' + + def fetch_image(self, candidate, extra): + """Downloads an image from a URL and checks whether it seems to + actually be an image. If so, returns a path to the downloaded image. + Otherwise, returns None. + """ + if extra['maxwidth']: + candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'], + candidate.url) + try: + with closing(self.request(candidate.url, stream=True, + message=u'downloading image')) as resp: + ct = resp.headers.get('Content-Type', None) + + # Download the image to a temporary file. As some servers + # (notably fanart.tv) have proven to return wrong Content-Types + # when images were uploaded with a bad file extension, do not + # rely on it. Instead validate the type using the file magic + # and only then determine the extension. + data = resp.iter_content(chunk_size=1024) + header = b'' + for chunk in data: + header += chunk + if len(header) >= 32: + # The imghdr module will only read 32 bytes, and our + # own additions in mediafile even less. + break + else: + # server didn't return enough data, i.e. corrupt image + return + + real_ct = _image_mime_type(header) + if real_ct is None: + # detection by file magic failed, fall back to the + # server-supplied Content-Type + # Is our type detection failsafe enough to drop this? + real_ct = ct + + if real_ct not in CONTENT_TYPES: + self._log.debug(u'not a supported image: {}', + real_ct or u'unknown content type') + return + + ext = b'.' + CONTENT_TYPES[real_ct][0] + + if real_ct != ct: + self._log.warn(u'Server specified {}, but returned a ' + u'{} image. Correcting the extension ' + u'to {}', + ct, real_ct, ext) + + with NamedTemporaryFile(suffix=ext, delete=False) as fh: + # write the first already loaded part of the image + fh.write(header) + # download the remaining part of the image + for chunk in data: + fh.write(chunk) + self._log.debug(u'downloaded art to: {0}', + util.displayable_path(fh.name)) + candidate.path = util.bytestring_path(fh.name) + return -AAO_URL = 'http://www.albumart.org/index_detail.php' -AAO_PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' + except (IOError, requests.RequestException, TypeError) as exc: + # Handling TypeError works around a urllib3 bug: + # https://github.com/shazow/urllib3/issues/556 + self._log.debug(u'error fetching art: {}', exc) + return + + +class CoverArtArchive(RemoteArtSource): + NAME = u"Cover Art Archive" + + URL = 'http://coverartarchive.org/release/{mbid}/front' + GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front' + + def get(self, album, extra): + """Return the Cover Art Archive and Cover Art Archive release group URLs + using album MusicBrainz release ID and release group ID. + """ + if album.mb_albumid: + yield self._candidate(url=self.URL.format(mbid=album.mb_albumid), + match=Candidate.MATCH_EXACT) + if album.mb_releasegroupid: + yield self._candidate( + url=self.GROUP_URL.format(mbid=album.mb_releasegroupid), + match=Candidate.MATCH_FALLBACK) + + +class Amazon(RemoteArtSource): + NAME = u"Amazon" + URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg' + INDICES = (1, 2) + + def get(self, album, extra): + """Generate URLs using Amazon ID (ASIN) string. + """ + if album.asin: + for index in self.INDICES: + yield self._candidate(url=self.URL % (album.asin, index), + match=Candidate.MATCH_EXACT) + + +class AlbumArtOrg(RemoteArtSource): + NAME = u"AlbumArt.org scraper" + URL = 'http://www.albumart.org/index_detail.php' + PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' + + def get(self, album, extra): + """Return art URL from AlbumArt.org using album ASIN. + """ + if not album.asin: + return + # Get the page from albumart.org. + try: + resp = self.request(self.URL, params={'asin': album.asin}) + self._log.debug(u'scraped art URL: {0}', resp.url) + except requests.RequestException: + self._log.debug(u'error scraping art page') + return + + # Search the page for the image URL. + m = re.search(self.PAT, resp.text) + if m: + image_url = m.group(1) + yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) + else: + self._log.debug(u'no image found on page') -def aao_art(asin): - """Return art URL from AlbumArt.org given an ASIN.""" - # Get the page from albumart.org. - try: - resp = requests_session.get(AAO_URL, params={'asin': asin}) - log.debug(u'fetchart: scraped art URL: {0}'.format(resp.url)) - except requests.RequestException: - log.debug(u'fetchart: error scraping art page') - return +class GoogleImages(RemoteArtSource): + NAME = u"Google Images" + URL = u'https://www.googleapis.com/customsearch/v1' + + def __init__(self, *args, **kwargs): + super(GoogleImages, self).__init__(*args, **kwargs) + self.key = self._config['google_key'].get(), + self.cx = self._config['google_engine'].get(), + + def get(self, album, extra): + """Return art URL from google custom search engine + given an album title and interpreter. + """ + if not (album.albumartist and album.album): + return + search_string = (album.albumartist + ',' + album.album).encode('utf-8') + response = self.request(self.URL, params={ + 'key': self.key, + 'cx': self.cx, + 'q': search_string, + 'searchType': 'image' + }) - # Search the page for the image URL. - m = re.search(AAO_PAT, resp.text) - if m: - image_url = m.group(1) - return image_url - else: - log.debug(u'fetchart: no image found on page') + # Get results using JSON. + try: + data = response.json() + except ValueError: + self._log.debug(u'google: error loading response: {}' + .format(response.text)) + return + + if 'error' in data: + reason = data['error']['errors'][0]['reason'] + self._log.debug(u'google fetchart error: {0}', reason) + return + + if 'items' in data.keys(): + for item in data['items']: + yield self._candidate(url=item['link'], + match=Candidate.MATCH_EXACT) + + +class FanartTV(RemoteArtSource): + """Art from fanart.tv requested using their API""" + NAME = u"fanart.tv" + + API_URL = 'http://webservice.fanart.tv/v3/' + API_ALBUMS = API_URL + 'music/albums/' + PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e' + + def __init__(self, *args, **kwargs): + super(FanartTV, self).__init__(*args, **kwargs) + self.client_key = self._config['fanarttv_key'].get() + + def get(self, album, extra): + if not album.mb_releasegroupid: + return + + response = self.request( + self.API_ALBUMS + album.mb_releasegroupid, + headers={'api-key': self.PROJECT_KEY, + 'client-key': self.client_key}) + + try: + data = response.json() + except ValueError: + self._log.debug(u'fanart.tv: error loading response: {}', + response.text) + return + + if u'status' in data and data[u'status'] == u'error': + if u'not found' in data[u'error message'].lower(): + self._log.debug(u'fanart.tv: no image found') + elif u'api key' in data[u'error message'].lower(): + self._log.warning(u'fanart.tv: Invalid API key given, please ' + u'enter a valid one in your config file.') + else: + self._log.debug(u'fanart.tv: error on request: {}', + data[u'error message']) + return + + matches = [] + # can there be more than one releasegroupid per response? + for mbid, art in data.get(u'albums', dict()).items(): + # there might be more art referenced, e.g. cdart, and an albumcover + # might not be present, even if the request was succesful + if album.mb_releasegroupid == mbid and u'albumcover' in art: + matches.extend(art[u'albumcover']) + # can this actually occur? + else: + self._log.debug(u'fanart.tv: unexpected mb_releasegroupid in ' + u'response!') + matches.sort(key=lambda x: x[u'likes'], reverse=True) + for item in matches: + # fanart.tv has a strict size requirement for album art to be + # uploaded + yield self._candidate(url=item[u'url'], + match=Candidate.MATCH_EXACT, + size=(1000, 1000)) + + +class ITunesStore(RemoteArtSource): + NAME = u"iTunes Store" + + def get(self, album, extra): + """Return art URL from iTunes Store given an album title. + """ + if not (album.albumartist and album.album): + return + search_string = (album.albumartist + ' ' + album.album).encode('utf-8') + try: + # Isolate bugs in the iTunes library while searching. + try: + results = itunes.search_album(search_string) + except Exception as exc: + self._log.debug(u'iTunes search failed: {0}', exc) + return -# Google Images scraper. + # Get the first match. + if results: + itunes_album = results[0] + else: + self._log.debug(u'iTunes search for {:r} got no results', + search_string) + return -GOOGLE_URL = 'https://ajax.googleapis.com/ajax/services/search/images' + if itunes_album.get_artwork()['100']: + small_url = itunes_album.get_artwork()['100'] + big_url = small_url.replace('100x100', '1200x1200') + yield self._candidate(url=big_url, match=Candidate.MATCH_EXACT) + else: + self._log.debug(u'album has no artwork in iTunes Store') + except IndexError: + self._log.debug(u'album not found in iTunes Store') + + +class Wikipedia(RemoteArtSource): + NAME = u"Wikipedia (queried through DBpedia)" + DBPEDIA_URL = 'http://dbpedia.org/sparql' + WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php' + SPARQL_QUERY = u'''PREFIX rdf: + PREFIX dbpprop: + PREFIX owl: + PREFIX rdfs: + PREFIX foaf: + + SELECT DISTINCT ?pageId ?coverFilename WHERE {{ + ?subject owl:wikiPageID ?pageId . + ?subject dbpprop:name ?name . + ?subject rdfs:label ?label . + {{ ?subject dbpprop:artist ?artist }} + UNION + {{ ?subject owl:artist ?artist }} + {{ ?artist foaf:name "{artist}"@en }} + UNION + {{ ?artist dbpprop:name "{artist}"@en }} + ?subject rdf:type . + ?subject dbpprop:cover ?coverFilename . + FILTER ( regex(?name, "{album}", "i") ) + }} + Limit 1''' + + def get(self, album, extra): + if not (album.albumartist and album.album): + return + + # Find the name of the cover art filename on DBpedia + cover_filename, page_id = None, None + dbpedia_response = self.request( + self.DBPEDIA_URL, + params={ + 'format': 'application/sparql-results+json', + 'timeout': 2500, + 'query': self.SPARQL_QUERY.format( + artist=album.albumartist.title(), album=album.album) + }, + headers={'content-type': 'application/json'}, + ) + try: + data = dbpedia_response.json() + results = data['results']['bindings'] + if results: + cover_filename = 'File:' + results[0]['coverFilename']['value'] + page_id = results[0]['pageId']['value'] + else: + self._log.debug(u'wikipedia: album not found on dbpedia') + except (ValueError, KeyError, IndexError): + self._log.debug(u'wikipedia: error scraping dbpedia response: {}', + dbpedia_response.text) + + # Ensure we have a filename before attempting to query wikipedia + if not (cover_filename and page_id): + return + + # DBPedia sometimes provides an incomplete cover_filename, indicated + # by the filename having a space before the extension, e.g., 'foo .bar' + # An additional Wikipedia call can help to find the real filename. + # This may be removed once the DBPedia issue is resolved, see: + # https://github.com/dbpedia/extraction-framework/issues/396 + if ' .' in cover_filename and \ + '.' not in cover_filename.split(' .')[-1]: + self._log.debug( + u'wikipedia: dbpedia provided incomplete cover_filename' + ) + lpart, rpart = cover_filename.rsplit(' .', 1) + + # Query all the images in the page + wikipedia_response = self.request( + self.WIKIPEDIA_URL, + params={ + 'format': 'json', + 'action': 'query', + 'continue': '', + 'prop': 'images', + 'pageids': page_id, + }, + headers={'content-type': 'application/json'}, + ) + + # Try to see if one of the images on the pages matches our + # imcomplete cover_filename + try: + data = wikipedia_response.json() + results = data['query']['pages'][page_id]['images'] + for result in results: + if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart), + result['title']): + cover_filename = result['title'] + break + except (ValueError, KeyError): + self._log.debug( + u'wikipedia: failed to retrieve a cover_filename' + ) + return + # Find the absolute url of the cover art on Wikipedia + wikipedia_response = self.request( + self.WIKIPEDIA_URL, + params={ + 'format': 'json', + 'action': 'query', + 'continue': '', + 'prop': 'imageinfo', + 'iiprop': 'url', + 'titles': cover_filename.encode('utf-8'), + }, + headers={'content-type': 'application/json'}, + ) + + try: + data = wikipedia_response.json() + results = data['query']['pages'] + for _, result in results.iteritems(): + image_url = result['imageinfo'][0]['url'] + yield self._candidate(url=image_url, + match=Candidate.MATCH_EXACT) + except (ValueError, KeyError, IndexError): + self._log.debug(u'wikipedia: error scraping imageinfo') + return + + +class FileSystem(LocalArtSource): + NAME = u"Filesystem" + + @staticmethod + def filename_priority(filename, cover_names): + """Sort order for image names. + + Return indexes of cover names found in the image filename. This + means that images with lower-numbered and more keywords will have + higher priority. + """ + return [idx for (idx, x) in enumerate(cover_names) if x in filename] + + def get(self, album, extra): + """Look for album art files in the specified directories. + """ + paths = extra['paths'] + if not paths: + return + cover_names = list(map(util.bytestring_path, extra['cover_names'])) + cover_names_str = b'|'.join(cover_names) + cover_pat = br''.join([br"(\b|_)(", cover_names_str, br")(\b|_)"]) + cautious = extra['cautious'] -def google_art(album): - """Return art URL from google.org given an album title and - interpreter. - """ - search_string = (album.albumartist + ',' + album.album).encode('utf-8') - response = requests_session.get(GOOGLE_URL, params={ - 'v': '1.0', - 'q': search_string, - 'start': '0', - }) - - # Get results using JSON. - try: - results = response.json() - data = results['responseData'] - dataInfo = data['results'] - for myUrl in dataInfo: - return myUrl['unescapedUrl'] - except: - log.debug(u'fetchart: error scraping art page') - return - - -# Art from the filesystem. - -def filename_priority(filename, cover_names): - """Sort order for image names. - - Return indexes of cover names found in the image filename. This - means that images with lower-numbered and more keywords will have higher - priority. - """ - return [idx for (idx, x) in enumerate(cover_names) if x in filename] - + for path in paths: + if not os.path.isdir(syspath(path)): + continue -def art_in_path(path, cover_names, cautious): - """Look for album art files in a specified directory.""" - if not os.path.isdir(path): - return - - # Find all files that look like images in the directory. - images = [] - for fn in os.listdir(path): - for ext in IMAGE_EXTENSIONS: - if fn.lower().endswith('.' + ext): - images.append(fn) - - # Look for "preferred" filenames. - images = sorted(images, key=lambda x: filename_priority(x, cover_names)) - cover_pat = r"(\b|_)({0})(\b|_)".format('|'.join(cover_names)) - for fn in images: - if re.search(cover_pat, os.path.splitext(fn)[0], re.I): - log.debug(u'fetchart: using well-named art file {0}'.format( - util.displayable_path(fn) - )) - return os.path.join(path, fn) - - # Fall back to any image in the folder. - if images and not cautious: - log.debug(u'fetchart: using fallback art file {0}'.format( - util.displayable_path(images[0]) - )) - return os.path.join(path, images[0]) + # Find all files that look like images in the directory. + images = [] + for fn in os.listdir(syspath(path)): + fn = bytestring_path(fn) + for ext in IMAGE_EXTENSIONS: + if fn.lower().endswith(b'.' + ext) and \ + os.path.isfile(syspath(os.path.join(path, fn))): + images.append(fn) + + # Look for "preferred" filenames. + images = sorted(images, + key=lambda x: + self.filename_priority(x, cover_names)) + remaining = [] + for fn in images: + if re.search(cover_pat, os.path.splitext(fn)[0], re.I): + self._log.debug(u'using well-named art file {0}', + util.displayable_path(fn)) + yield self._candidate(path=os.path.join(path, fn), + match=Candidate.MATCH_EXACT) + else: + remaining.append(fn) + + # Fall back to any image in the folder. + if remaining and not cautious: + self._log.debug(u'using fallback art file {0}', + util.displayable_path(remaining[0])) + yield self._candidate(path=os.path.join(path, remaining[0]), + match=Candidate.MATCH_FALLBACK) # Try each source in turn. - -def _source_urls(album): - """Generate possible source URLs for an album's art. The URLs are - not guaranteed to work so they each need to be attempted in turn. - This allows the main `art_for_album` function to abort iteration - through this sequence early to avoid the cost of scraping when not - necessary. - """ - # Cover Art Archive. - if album.mb_albumid: - yield caa_art(album.mb_albumid) - if album.mb_releasegroupid: - yield caa_group_art(album.mb_releasegroupid) - - # Amazon and AlbumArt.org. - if album.asin: - for url in art_for_asin(album.asin): - yield url - url = aao_art(album.asin) - if url: - yield url - - if config['fetchart']['google_search']: - url = google_art(album) - if url: - yield url - - -def art_for_album(album, paths, maxwidth=None, local_only=False): - """Given an Album object, returns a path to downloaded art for the - album (or None if no art is found). If `maxwidth`, then images are - resized to this maximum pixel size. If `local_only`, then only local - image files from the filesystem are returned; no network requests - are made. - """ - out = None - - # Local art. - cover_names = config['fetchart']['cover_names'].as_str_seq() - cover_names = map(util.bytestring_path, cover_names) - cautious = config['fetchart']['cautious'].get(bool) - if paths: - for path in paths: - out = art_in_path(path, cover_names, cautious) - if out: - break - - # Web art sources. - remote_priority = config['fetchart']['remote_priority'].get(bool) - if not local_only and (remote_priority or not out): - for url in _source_urls(album): - if maxwidth: - url = ArtResizer.shared.proxy_url(maxwidth, url) - candidate = _fetch_image(url) - if candidate: - out = candidate - break - - if maxwidth and out: - out = ArtResizer.shared.resize(maxwidth, out) - return out - +SOURCES_ALL = [u'filesystem', + u'coverart', u'itunes', u'amazon', u'albumart', + u'wikipedia', u'google', u'fanarttv'] + +ART_SOURCES = { + u'filesystem': FileSystem, + u'coverart': CoverArtArchive, + u'itunes': ITunesStore, + u'albumart': AlbumArtOrg, + u'amazon': Amazon, + u'wikipedia': Wikipedia, + u'google': GoogleImages, + u'fanarttv': FanartTV, +} +SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()} # PLUGIN LOGIC ############################################################### -def batch_fetch_art(lib, albums, force, maxwidth=None): - """Fetch album art for each of the albums. This implements the manual - fetchart CLI command. - """ - for album in albums: - if album.artpath and not force: - message = 'has album art' - else: - # In ordinary invocations, look for images on the - # filesystem. When forcing, however, always go to the Web - # sources. - local_paths = None if force else [album.path] - - path = art_for_album(album, local_paths, maxwidth) - if path: - album.set_art(path, False) - album.store() - message = ui.colorize('green', 'found album art') - else: - message = ui.colorize('red', 'no art found') - - log.info(u'{0} - {1}: {2}'.format(album.albumartist, album.album, - message)) - +class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): + PAT_PX = r"(0|[1-9][0-9]*)px" + PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%" -class FetchArtPlugin(BeetsPlugin): def __init__(self): super(FetchArtPlugin, self).__init__() + # Holds candidates corresponding to downloaded images between + # fetching them and placing them in the filesystem. + self.art_candidates = {} + self.config.add({ 'auto': True, + 'minwidth': 0, 'maxwidth': 0, - 'remote_priority': False, + 'enforce_ratio': False, 'cautious': False, - 'google_search': False, 'cover_names': ['cover', 'front', 'art', 'album', 'folder'], + 'sources': ['filesystem', + 'coverart', 'itunes', 'amazon', 'albumart'], + 'google_key': None, + 'google_engine': u'001442825323518660753:hrh5ch1gjzm', + 'fanarttv_key': None, + 'store_source': False, }) + self.config['google_key'].redact = True + self.config['fanarttv_key'].redact = True - # Holds paths to downloaded images between fetching them and - # placing them in the filesystem. - self.art_paths = {} - + self.minwidth = self.config['minwidth'].get(int) self.maxwidth = self.config['maxwidth'].get(int) + + # allow both pixel and percentage-based margin specifications + self.enforce_ratio = self.config['enforce_ratio'].get( + confit.OneOf([bool, + confit.String(pattern=self.PAT_PX), + confit.String(pattern=self.PAT_PERCENT)])) + self.margin_px = None + self.margin_percent = None + if type(self.enforce_ratio) is unicode: + if self.enforce_ratio[-1] == u'%': + self.margin_percent = float(self.enforce_ratio[:-1]) / 100 + elif self.enforce_ratio[-2:] == u'px': + self.margin_px = int(self.enforce_ratio[:-2]) + else: + # shouldn't happen + raise confit.ConfigValueError() + self.enforce_ratio = True + + cover_names = self.config['cover_names'].as_str_seq() + self.cover_names = list(map(util.bytestring_path, cover_names)) + self.cautious = self.config['cautious'].get(bool) + self.store_source = self.config['store_source'].get(bool) + + self.src_removed = (config['import']['delete'].get(bool) or + config['import']['move'].get(bool)) + if self.config['auto']: # Enable two import hooks when fetching is enabled. self.import_stages = [self.fetch_art] self.register_listener('import_task_files', self.assign_art) + available_sources = list(SOURCES_ALL) + if not HAVE_ITUNES and u'itunes' in available_sources: + available_sources.remove(u'itunes') + if not self.config['google_key'].get() and \ + u'google' in available_sources: + available_sources.remove(u'google') + sources_name = plugins.sanitize_choices( + self.config['sources'].as_str_seq(), available_sources) + if 'remote_priority' in self.config: + self._log.warning( + u'The `fetch_art.remote_priority` configuration option has ' + u'been deprecated, see the documentation.') + if self.config['remote_priority'].get(bool): + try: + sources_name.remove(u'filesystem') + sources_name.append(u'filesystem') + except ValueError: + pass + self.sources = [ART_SOURCES[s](self._log, self.config) + for s in sources_name] + # Asynchronous; after music is added to the library. def fetch_art(self, session, task): """Find art for the album being imported.""" if task.is_album: # Only fetch art for full albums. + if task.album.artpath and os.path.isfile(task.album.artpath): + # Album already has art (probably a re-import); skip it. + return if task.choice_flag == importer.action.ASIS: # For as-is imports, don't search Web sources for art. local = True @@ -323,34 +788,110 @@ # For any other choices (e.g., TRACKS), do nothing. return - path = art_for_album(task.album, task.paths, self.maxwidth, local) + candidate = self.art_for_album(task.album, task.paths, local) - if path: - self.art_paths[task] = path + if candidate: + self.art_candidates[task] = candidate + + def _set_art(self, album, candidate, delete=False): + album.set_art(candidate.path, delete) + if self.store_source: + # store the source of the chosen artwork in a flexible field + self._log.debug( + u"Storing art_source for {0.albumartist} - {0.album}", + album) + album.art_source = SOURCE_NAMES[type(candidate.source)] + album.store() # Synchronous; after music files are put in place. def assign_art(self, session, task): """Place the discovered art in the filesystem.""" - if task in self.art_paths: - path = self.art_paths.pop(task) + if task in self.art_candidates: + candidate = self.art_candidates.pop(task) - album = task.album - src_removed = (config['import']['delete'].get(bool) or - config['import']['move'].get(bool)) - album.set_art(path, not src_removed) - album.store() - if src_removed: - task.prune(path) + self._set_art(task.album, candidate, not self.src_removed) + + if self.src_removed: + task.prune(candidate.path) # Manual album art fetching. def commands(self): cmd = ui.Subcommand('fetchart', help='download album art') - cmd.parser.add_option('-f', '--force', dest='force', - action='store_true', default=False, - help='re-download art when already present') + cmd.parser.add_option( + u'-f', u'--force', dest='force', + action='store_true', default=False, + help=u're-download art when already present' + ) def func(lib, opts, args): - batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force, - self.maxwidth) + self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force) cmd.func = func return [cmd] + + # Utilities converted from functions to methods on logging overhaul + + def art_for_album(self, album, paths, local_only=False): + """Given an Album object, returns a path to downloaded art for the + album (or None if no art is found). If `maxwidth`, then images are + resized to this maximum pixel size. If `local_only`, then only local + image files from the filesystem are returned; no network requests + are made. + """ + out = None + + # all the information any of the sources might need + extra = {'paths': paths, + 'cover_names': self.cover_names, + 'cautious': self.cautious, + 'enforce_ratio': self.enforce_ratio, + 'margin_px': self.margin_px, + 'margin_percent': self.margin_percent, + 'minwidth': self.minwidth, + 'maxwidth': self.maxwidth} + + for source in self.sources: + if source.IS_LOCAL or not local_only: + self._log.debug( + u'trying source {0} for album {1.albumartist} - {1.album}', + SOURCE_NAMES[type(source)], + album, + ) + # URLs might be invalid at this point, or the image may not + # fulfill the requirements + for candidate in source.get(album, extra): + source.fetch_image(candidate, extra) + if candidate.validate(extra): + out = candidate + self._log.debug( + u'using {0.LOC_STR} image {1}'.format( + source, util.displayable_path(out.path))) + break + if out: + break + + if out: + out.resize(extra) + + return out + + def batch_fetch_art(self, lib, albums, force): + """Fetch album art for each of the albums. This implements the manual + fetchart CLI command. + """ + for album in albums: + if album.artpath and not force and os.path.isfile(album.artpath): + message = ui.colorize('text_highlight_minor', u'has album art') + else: + # In ordinary invocations, look for images on the + # filesystem. When forcing, however, always go to the Web + # sources. + local_paths = None if force else [album.path] + + candidate = self.art_for_album(album, local_paths) + if candidate: + self._set_art(album, candidate) + message = ui.colorize('text_success', u'found album art') + else: + message = ui.colorize('text_error', u'no art found') + + self._log.info(u'{0}: {1}', album, message) diff -Nru beets-1.3.8+dfsg/beetsplug/filefilter.py beets-1.3.19/beetsplug/filefilter.py --- beets-1.3.8+dfsg/beetsplug/filefilter.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/filefilter.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Malte Ried. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Filter imported files using a regular expression. +""" + +from __future__ import division, absolute_import, print_function + +import re +from beets import config +from beets.util import bytestring_path +from beets.plugins import BeetsPlugin +from beets.importer import SingletonImportTask + + +class FileFilterPlugin(BeetsPlugin): + def __init__(self): + super(FileFilterPlugin, self).__init__() + self.register_listener('import_task_created', + self.import_task_created_event) + self.config.add({ + 'path': '.*' + }) + + self.path_album_regex = \ + self.path_singleton_regex = \ + re.compile(bytestring_path(self.config['path'].get())) + + if 'album_path' in self.config: + self.path_album_regex = re.compile( + bytestring_path(self.config['album_path'].get())) + + if 'singleton_path' in self.config: + self.path_singleton_regex = re.compile( + bytestring_path(self.config['singleton_path'].get())) + + def import_task_created_event(self, session, task): + if task.items and len(task.items) > 0: + items_to_import = [] + for item in task.items: + if self.file_filter(item['path']): + items_to_import.append(item) + if len(items_to_import) > 0: + task.items = items_to_import + else: + # Returning an empty list of tasks from the handler + # drops the task from the rest of the importer pipeline. + return [] + + elif isinstance(task, SingletonImportTask): + if not self.file_filter(task.item['path']): + return [] + + # If not filtered, return the original task unchanged. + return [task] + + def file_filter(self, full_path): + """Checks if the configured regular expressions allow the import + of the file given in full_path. + """ + import_config = dict(config['import']) + full_path = bytestring_path(full_path) + if 'singletons' not in import_config or not import_config[ + 'singletons']: + # Album + return self.path_album_regex.match(full_path) is not None + else: + # Singleton + return self.path_singleton_regex.match(full_path) is not None diff -Nru beets-1.3.8+dfsg/beetsplug/freedesktop.py beets-1.3.19/beetsplug/freedesktop.py --- beets-1.3.8+dfsg/beetsplug/freedesktop.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/freedesktop.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Matt Lichtenberg. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Creates freedesktop.org-compliant .directory files on an album level. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui + + +class FreedesktopPlugin(BeetsPlugin): + def commands(self): + deprecated = ui.Subcommand( + "freedesktop", + help=u"Print a message to redirect to thumbnails --dolphin") + deprecated.func = self.deprecation_message + return [deprecated] + + def deprecation_message(self, lib, opts, args): + ui.print_(u"This plugin is deprecated. Its functionality is " + u"superseded by the 'thumbnails' plugin") + ui.print_(u"'thumbnails --dolphin' replaces freedesktop. See doc & " + u"changelog for more information") diff -Nru beets-1.3.8+dfsg/beetsplug/fromfilename.py beets-1.3.19/beetsplug/fromfilename.py --- beets-1.3.8+dfsg/beetsplug/fromfilename.py 2014-04-14 00:39:49.000000000 +0000 +++ beets-1.3.19/beetsplug/fromfilename.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Jan-Erik Dahlin +# Copyright 2016, Jan-Erik Dahlin # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,6 +16,8 @@ """If the title is empty, try to extract track and title from the filename. """ +from __future__ import division, absolute_import, print_function + from beets import plugins from beets.util import displayable_path import os @@ -24,29 +27,29 @@ # Filename field extraction patterns. PATTERNS = [ # "01 - Track 01" and "01": do nothing - ur'^(\d+)\s*-\s*track\s*\d$', - ur'^\d+$', + r'^(\d+)\s*-\s*track\s*\d$', + r'^\d+$', # Useful patterns. - ur'^(?P.+)-(?P.+)-(?P<tag>.*)$', - ur'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', - ur'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', - ur'^(?P<artist>.+)-(?P<title>.+)$', - ur'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$', - ur'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$', - ur'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$', - ur'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$', - ur'^(?P<title>.+)$', - ur'^(?P<track>\d+)\.\s*(?P<title>.+)$', - ur'^(?P<track>\d+)\s*-\s*(?P<title>.+)$', - ur'^(?P<track>\d+)\s(?P<title>.+)$', - ur'^(?P<title>.+) by (?P<artist>.+)$', + r'^(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', + r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', + r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', + r'^(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<title>.+)$', + r'^(?P<track>\d+)\.\s*(?P<title>.+)$', + r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$', + r'^(?P<track>\d+)\s(?P<title>.+)$', + r'^(?P<title>.+) by (?P<artist>.+)$', ] # Titles considered "empty" and in need of replacement. BAD_TITLE_PATTERNS = [ - ur'^$', - ur'\d+?\s?-?\s*track\s*\d+', + r'^$', + r'\d+?\s?-?\s*track\s*\d+', ] @@ -137,10 +140,11 @@ # Plugin structure and hook into import process. class FromFilenamePlugin(plugins.BeetsPlugin): - pass + def __init__(self): + super(FromFilenamePlugin, self).__init__() + self.register_listener('import_task_start', filename_task) -@FromFilenamePlugin.listen('import_task_start') def filename_task(task, session): """Examine each item in the task to see if we can extract a title from the filename. Try to match all filenames to a number of diff -Nru beets-1.3.8+dfsg/beetsplug/ftintitle.py beets-1.3.19/beetsplug/ftintitle.py --- beets-1.3.8+dfsg/beetsplug/ftintitle.py 2014-04-27 03:49:51.000000000 +0000 +++ beets-1.3.19/beetsplug/ftintitle.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle> +# Copyright 2016, Verrus, <github.com/Verrus/beets-plugin-featInTitle> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,11 +15,13 @@ """Moves "featured" artists to the title from the artist field. """ -from beets.plugins import BeetsPlugin +from __future__ import division, absolute_import, print_function + +import re + +from beets import plugins from beets import ui from beets.util import displayable_path -from beets import config -import re def split_on_feat(artist): @@ -27,121 +30,140 @@ artist, which is always a string, and the featuring artist, which may be a string or None if none is present. """ - parts = re.split( - r'[fF]t\.|[fF]eaturing|[fF]eat\.|\b[wW]ith\b|&|vs\.|and', - artist, - 1, # Only split on the first "feat". - ) - parts = [s.strip() for s in parts] + # split on the first "feat". + regex = re.compile(plugins.feat_tokens(), re.IGNORECASE) + parts = [s.strip() for s in regex.split(artist, 1)] if len(parts) == 1: return parts[0], None else: - return parts + return tuple(parts) def contains_feat(title): """Determine whether the title contains a "featured" marker. """ - return bool(re.search( - r'[fF]t\.|[fF]eaturing|[fF]eat\.|\b[wW]ith\b|&', - title, - )) + return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE)) -def update_metadata(item, feat_part, drop_feat): - """Choose how to add new artists to the title and set the new - metadata. Also, print out messages about any changes that are made. - If `drop_feat` is set, then do not add the artist to the title; just - remove it from the artist field. - """ - # In all cases, update the artist fields. - ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist)) - item.artist = item.albumartist - if item.artist_sort: - # Just strip the featured artist from the sort name. - item.artist_sort, _ = split_on_feat(item.artist_sort) - - # Only update the title if it does not already contain a featured - # artist and if we do not drop featuring information. - if not drop_feat and not contains_feat(item.title): - new_title = u"{0} feat. {1}".format(item.title, feat_part) - ui.print_(u'title: {0} -> {1}'.format(item.title, new_title)) - item.title = new_title - - -def ft_in_title(item, drop_feat): - """Look for featured artists in the item's artist fields and move - them to the title. +def find_feat_part(artist, albumartist): + """Attempt to find featured artists in the item's artist fields and + return the results. Returns None if no featured artist found. """ - artist = item.artist.strip() - albumartist = item.albumartist.strip() + feat_part = None - # Check whether there is a featured artist on this track and the - # artist field does not exactly match the album artist field. In - # that case, we attempt to move the featured artist to the title. - _, featured = split_on_feat(artist) - if featured and albumartist != artist and albumartist: - ui.print_(displayable_path(item.path)) - feat_part = None - - # Look for the album artist in the artist field. If it's not - # present, give up. - albumartist_split = artist.split(albumartist) - if len(albumartist_split) <= 1: - ui.print_('album artist not present in artist') - - # If the last element of the split (the right-hand side of the - # album artist) is nonempty, then it probably contains the - # featured artist. - elif albumartist_split[-1] != '': - # Extract the featured artist from the right-hand side. - _, feat_part = split_on_feat(albumartist_split[-1]) - - # Otherwise, if there's nothing on the right-hand side, look for a - # featuring artist on the left-hand side. - else: - lhs, rhs = split_on_feat(albumartist_split[0]) - if rhs: - feat_part = lhs - - # If we have a featuring artist, move it to the title. - if feat_part: - update_metadata(item, feat_part, drop_feat) - else: - ui.print_(u'no featuring artists found') + # Look for the album artist in the artist field. If it's not + # present, give up. + albumartist_split = artist.split(albumartist, 1) + if len(albumartist_split) <= 1: + return feat_part + + # If the last element of the split (the right-hand side of the + # album artist) is nonempty, then it probably contains the + # featured artist. + elif albumartist_split[-1] != '': + # Extract the featured artist from the right-hand side. + _, feat_part = split_on_feat(albumartist_split[-1]) - ui.print_() + # Otherwise, if there's nothing on the right-hand side, look for a + # featuring artist on the left-hand side. + else: + lhs, rhs = split_on_feat(albumartist_split[0]) + if lhs: + feat_part = lhs + + return feat_part -class FtInTitlePlugin(BeetsPlugin): +class FtInTitlePlugin(plugins.BeetsPlugin): def __init__(self): super(FtInTitlePlugin, self).__init__() self.config.add({ - 'drop': False + 'auto': True, + 'drop': False, + 'format': u'feat. {0}', }) self._command = ui.Subcommand( 'ftintitle', - help='move featured artists to the title field') + help=u'move featured artists to the title field') self._command.parser.add_option( - '-d', '--drop', dest='drop', + u'-d', u'--drop', dest='drop', action='store_true', default=False, - help='drop featuring from artists and ignore title update') + help=u'drop featuring from artists and ignore title update') + + if self.config['auto']: + self.import_stages = [self.imported] def commands(self): def func(lib, opts, args): self.config.set_args(opts) drop_feat = self.config['drop'].get(bool) - write = config['import']['write'].get(bool) + write = ui.should_write() for item in lib.items(ui.decargs(args)): - ft_in_title(item, drop_feat) + self.ft_in_title(item, drop_feat) item.store() if write: item.try_write() self._command.func = func return [self._command] + + def imported(self, session, task): + """Import hook for moving featuring artist automatically. + """ + drop_feat = self.config['drop'].get(bool) + + for item in task.imported_items(): + self.ft_in_title(item, drop_feat) + item.store() + + def update_metadata(self, item, feat_part, drop_feat): + """Choose how to add new artists to the title and set the new + metadata. Also, print out messages about any changes that are made. + If `drop_feat` is set, then do not add the artist to the title; just + remove it from the artist field. + """ + # In all cases, update the artist fields. + self._log.info(u'artist: {0} -> {1}', item.artist, item.albumartist) + item.artist = item.albumartist + if item.artist_sort: + # Just strip the featured artist from the sort name. + item.artist_sort, _ = split_on_feat(item.artist_sort) + + # Only update the title if it does not already contain a featured + # artist and if we do not drop featuring information. + if not drop_feat and not contains_feat(item.title): + feat_format = self.config['format'].get(unicode) + new_format = feat_format.format(feat_part) + new_title = u"{0} {1}".format(item.title, new_format) + self._log.info(u'title: {0} -> {1}', item.title, new_title) + item.title = new_title + + def ft_in_title(self, item, drop_feat): + """Look for featured artists in the item's artist fields and move + them to the title. + """ + artist = item.artist.strip() + albumartist = item.albumartist.strip() + + # Check whether there is a featured artist on this track and the + # artist field does not exactly match the album artist field. In + # that case, we attempt to move the featured artist to the title. + _, featured = split_on_feat(artist) + if featured and albumartist != artist and albumartist: + self._log.info('{}', displayable_path(item.path)) + + feat_part = None + + # Attempt to find the featured artist. + feat_part = find_feat_part(artist, albumartist) + + # If we have a featuring artist, move it to the title. + if feat_part: + self.update_metadata(item, feat_part, drop_feat) + else: + self._log.info(u'no featuring artists found') diff -Nru beets-1.3.8+dfsg/beetsplug/fuzzy.py beets-1.3.19/beetsplug/fuzzy.py --- beets-1.3.8+dfsg/beetsplug/fuzzy.py 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/beetsplug/fuzzy.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Philippe Mongeau. +# Copyright 2016, Philippe Mongeau. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,21 +16,23 @@ """Provides a fuzzy matching query. """ +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin from beets.dbcore.query import StringFieldQuery -import beets +from beets import config import difflib class FuzzyQuery(StringFieldQuery): @classmethod - def string_match(self, pattern, val): + def string_match(cls, pattern, val): # smartcase if pattern.islower(): val = val.lower() - queryMatcher = difflib.SequenceMatcher(None, pattern, val) - threshold = beets.config['fuzzy']['threshold'].as_number() - return queryMatcher.quick_ratio() >= threshold + query_matcher = difflib.SequenceMatcher(None, pattern, val) + threshold = config['fuzzy']['threshold'].as_number() + return query_matcher.quick_ratio() >= threshold class FuzzyPlugin(BeetsPlugin): @@ -41,5 +44,5 @@ }) def queries(self): - prefix = beets.config['fuzzy']['prefix'].get(basestring) + prefix = self.config['prefix'].get(basestring) return {prefix: FuzzyQuery} diff -Nru beets-1.3.8+dfsg/beetsplug/hook.py beets-1.3.19/beetsplug/hook.py --- beets-1.3.8+dfsg/beetsplug/hook.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/hook.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,108 @@ +# This file is part of beets. +# Copyright 2015, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Allows custom commands to be run when an event is emitted by beets""" +from __future__ import division, absolute_import, print_function + +import string +import subprocess + +from beets.plugins import BeetsPlugin +from beets.ui import _arg_encoding +from beets.util import shlex_split + + +class CodingFormatter(string.Formatter): + """A custom string formatter that decodes the format string and it's + fields. + """ + + def __init__(self, coding): + """Creates a new coding formatter with the provided coding.""" + self._coding = coding + + def format(self, format_string, *args, **kwargs): + """Formats the provided string using the provided arguments and keyword + arguments. + + This method decodes the format string using the formatter's coding. + + See str.format and string.Formatter.format. + """ + try: + format_string = format_string.decode(self._coding) + except UnicodeEncodeError: + pass + + return super(CodingFormatter, self).format(format_string, *args, + **kwargs) + + def convert_field(self, value, conversion): + """Converts the provided value given a conversion type. + + This method decodes the converted value using the formatter's coding. + + See string.Formatter.convert_field. + """ + converted = super(CodingFormatter, self).convert_field(value, + conversion) + try: + converted = converted.decode(self._coding) + except UnicodeEncodeError: + pass + + return converted + + +class HookPlugin(BeetsPlugin): + """Allows custom commands to be run when an event is emitted by beets""" + def __init__(self): + super(HookPlugin, self).__init__() + + self.config.add({ + 'hooks': [] + }) + + hooks = self.config['hooks'].get(list) + + for hook_index in range(len(hooks)): + hook = self.config['hooks'][hook_index] + + hook_event = hook['event'].get(unicode) + hook_command = hook['command'].get(unicode) + + self.create_and_register_hook(hook_event, hook_command) + + def create_and_register_hook(self, event, command): + def hook_function(**kwargs): + if command is None or len(command) == 0: + self._log.error('invalid command "{0}"', command) + return + + formatter = CodingFormatter(_arg_encoding()) + command_pieces = shlex_split(command) + + for i, piece in enumerate(command_pieces): + command_pieces[i] = formatter.format(piece, event=event, + **kwargs) + + self._log.debug(u'running command "{0}" for event {1}', + u' '.join(command_pieces), event) + + try: + subprocess.Popen(command_pieces).wait() + except OSError as exc: + self._log.error(u'hook for {0} failed: {1}', event, exc) + + self.register_listener(event, hook_function) diff -Nru beets-1.3.8+dfsg/beetsplug/ihate.py beets-1.3.19/beetsplug/ihate.py --- beets-1.3.8+dfsg/beetsplug/ihate.py 2014-09-16 01:19:12.000000000 +0000 +++ beets-1.3.19/beetsplug/ihate.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Blemjhoo Tezoulbr <baobab@heresiarch.info>. +# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,9 +13,10 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Warns you about things you hate (or even blocks import).""" -import logging from beets.plugins import BeetsPlugin from beets.importer import action from beets.library import parse_query_string @@ -37,8 +39,6 @@ class IHatePlugin(BeetsPlugin): - _log = logging.getLogger('beets') - def __init__(self): super(IHatePlugin, self).__init__() self.register_listener('import_task_choice', @@ -69,16 +69,14 @@ if task.choice_flag == action.APPLY: if skip_queries or warn_queries: - self._log.debug(u'[ihate] processing your hate') + self._log.debug(u'processing your hate') if self.do_i_hate_this(task, skip_queries): task.choice_flag = action.SKIP - self._log.info(u'[ihate] skipped: {0}' - .format(summary(task))) + self._log.info(u'skipped: {0}', summary(task)) return if self.do_i_hate_this(task, warn_queries): - self._log.info(u'[ihate] you maybe hate this: {0}' - .format(summary(task))) + self._log.info(u'you may hate this: {0}', summary(task)) else: - self._log.debug(u'[ihate] nothing to do') + self._log.debug(u'nothing to do') else: - self._log.debug(u'[ihate] user made a decision, nothing to do') + self._log.debug(u'user made a decision, nothing to do') diff -Nru beets-1.3.8+dfsg/beetsplug/importadded.py beets-1.3.19/beetsplug/importadded.py --- beets-1.3.8+dfsg/beetsplug/importadded.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/importadded.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,96 +1,134 @@ -"""Populate an items `added` and `mtime` field by using the file modification -time (mtime) of the item's source file before import. -""" +# -*- coding: utf-8 -*- + +"""Populate an item's `added` and `mtime` fields by using the file +modification time (mtime) of the item's source file before import. -from __future__ import unicode_literals, absolute_import, print_function +Reimported albums and items are skipped. +""" +from __future__ import division, absolute_import, print_function -import logging import os -from beets import config from beets import util +from beets import importer from beets.plugins import BeetsPlugin -log = logging.getLogger('beets') - class ImportAddedPlugin(BeetsPlugin): def __init__(self): super(ImportAddedPlugin, self).__init__() self.config.add({ 'preserve_mtimes': False, + 'preserve_write_mtimes': False, }) - -@ImportAddedPlugin.listen('import_task_start') -def check_config(task, session): - config['importadded']['preserve_mtimes'].get(bool) - - -def write_file_mtime(path, mtime): - """Write the given mtime to the destination path. - """ - stat = os.stat(util.syspath(path)) - os.utime(util.syspath(path), - (stat.st_atime, mtime)) - -# key: item path in the library -# value: the file mtime of the file the item was imported from -item_mtime = dict() - - -def write_item_mtime(item, mtime): - """Write the given mtime to an item's `mtime` field and to the mtime of the - item's file. - """ - if mtime is None: - log.warn(u"No mtime to be preserved for item {0}" - .format(util.displayable_path(item.path))) - return - - # The file's mtime on disk must be in sync with the item's mtime - write_file_mtime(util.syspath(item.path), mtime) - item.mtime = mtime - - -@ImportAddedPlugin.listen('before_item_moved') -@ImportAddedPlugin.listen('item_copied') -def record_import_mtime(item, source, destination): - """Record the file mtime of an item's path before import. - """ - if (source == destination): - # Re-import of an existing library item? - return - - mtime = os.stat(util.syspath(source)).st_mtime - item_mtime[destination] = mtime - log.debug(u"Recorded mtime {0} for item '{1}' imported from '{2}'".format( - mtime, util.displayable_path(destination), - util.displayable_path(source))) - - -@ImportAddedPlugin.listen('album_imported') -def update_album_times(lib, album): - album_mtimes = [] - for item in album.items(): - mtime = item_mtime[item.path] - if mtime is not None: - album_mtimes.append(mtime) - if config['importadded']['preserve_mtimes'].get(bool): - write_item_mtime(item, mtime) - item.store() - del item_mtime[item.path] - - album.added = min(album_mtimes) - album.store() - - -@ImportAddedPlugin.listen('item_imported') -def update_item_times(lib, item): - mtime = item_mtime[item.path] - if mtime is not None: - item.added = mtime - if config['importadded']['preserve_mtimes'].get(bool): - write_item_mtime(item, mtime) - item.store() - del item_mtime[item.path] + # item.id for new items that were reimported + self.reimported_item_ids = None + # album.path for old albums that were replaced by a reimported album + self.replaced_album_paths = None + # item path in the library to the mtime of the source file + self.item_mtime = dict() + + register = self.register_listener + register('import_task_start', self.check_config) + register('import_task_start', self.record_if_inplace) + register('import_task_files', self.record_reimported) + register('before_item_moved', self.record_import_mtime) + register('item_copied', self.record_import_mtime) + register('item_linked', self.record_import_mtime) + register('album_imported', self.update_album_times) + register('item_imported', self.update_item_times) + register('after_write', self.update_after_write_time) + + def check_config(self, task, session): + self.config['preserve_mtimes'].get(bool) + + def reimported_item(self, item): + return item.id in self.reimported_item_ids + + def reimported_album(self, album): + return album.path in self.replaced_album_paths + + def record_if_inplace(self, task, session): + if not (session.config['copy'] or session.config['move'] or + session.config['link']): + self._log.debug(u"In place import detected, recording mtimes from " + u"source paths") + items = [task.item] \ + if isinstance(task, importer.SingletonImportTask) \ + else task.items + for item in items: + self.record_import_mtime(item, item.path, item.path) + + def record_reimported(self, task, session): + self.reimported_item_ids = set(item.id for item, replaced_items + in task.replaced_items.iteritems() + if replaced_items) + self.replaced_album_paths = set(task.replaced_albums.keys()) + + def write_file_mtime(self, path, mtime): + """Write the given mtime to the destination path. + """ + stat = os.stat(util.syspath(path)) + os.utime(util.syspath(path), (stat.st_atime, mtime)) + + def write_item_mtime(self, item, mtime): + """Write the given mtime to an item's `mtime` field and to the mtime + of the item's file. + """ + # The file's mtime on disk must be in sync with the item's mtime + self.write_file_mtime(util.syspath(item.path), mtime) + item.mtime = mtime + + def record_import_mtime(self, item, source, destination): + """Record the file mtime of an item's path before its import. + """ + mtime = os.stat(util.syspath(source)).st_mtime + self.item_mtime[destination] = mtime + self._log.debug(u"Recorded mtime {0} for item '{1}' imported from " + u"'{2}'", mtime, util.displayable_path(destination), + util.displayable_path(source)) + + def update_album_times(self, lib, album): + if self.reimported_album(album): + self._log.debug(u"Album '{0}' is reimported, skipping import of " + u"added dates for the album and its items.", + util.displayable_path(album.path)) + return + + album_mtimes = [] + for item in album.items(): + mtime = self.item_mtime.pop(item.path, None) + if mtime: + album_mtimes.append(mtime) + if self.config['preserve_mtimes'].get(bool): + self.write_item_mtime(item, mtime) + item.store() + album.added = min(album_mtimes) + self._log.debug(u"Import of album '{0}', selected album.added={1} " + u"from item file mtimes.", album.album, album.added) + album.store() + + def update_item_times(self, lib, item): + if self.reimported_item(item): + self._log.debug(u"Item '{0}' is reimported, skipping import of " + u"added date.", util.displayable_path(item.path)) + return + mtime = self.item_mtime.pop(item.path, None) + if mtime: + item.added = mtime + if self.config['preserve_mtimes'].get(bool): + self.write_item_mtime(item, mtime) + self._log.debug(u"Import of item '{0}', selected item.added={1}", + util.displayable_path(item.path), item.added) + item.store() + + def update_after_write_time(self, item): + """Update the mtime of the item's file with the item.added value + after each write of the item if `preserve_write_mtimes` is enabled. + """ + if item.added: + if self.config['preserve_write_mtimes'].get(bool): + self.write_item_mtime(item, item.added) + self._log.debug(u"Write of item '{0}', selected item.added={1}", + util.displayable_path(item.path), item.added) diff -Nru beets-1.3.8+dfsg/beetsplug/importfeeds.py beets-1.3.19/beetsplug/importfeeds.py --- beets-1.3.8+dfsg/beetsplug/importfeeds.py 2014-09-14 20:03:59.000000000 +0000 +++ beets-1.3.19/beetsplug/importfeeds.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Fabrice Laporte. +# Copyright 2016, Fabrice Laporte. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,6 +13,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Write paths of imported files in various formats to ease later import in a music player. Also allow printing the new file locations to stdout in case one wants to manually add music to a player by its path. @@ -19,40 +22,12 @@ import datetime import os import re -import logging from beets.plugins import BeetsPlugin -from beets.util import normpath, syspath, bytestring_path +from beets.util import mkdirall, normpath, syspath, bytestring_path from beets import config M3U_DEFAULT_NAME = 'imported.m3u' -log = logging.getLogger('beets') - - -class ImportFeedsPlugin(BeetsPlugin): - def __init__(self): - super(ImportFeedsPlugin, self).__init__() - - self.config.add({ - 'formats': [], - 'm3u_name': u'imported.m3u', - 'dir': None, - 'relative_to': None, - 'absolute_path': False, - }) - - feeds_dir = self.config['dir'].get() - if feeds_dir: - feeds_dir = os.path.expanduser(bytestring_path(feeds_dir)) - self.config['dir'] = feeds_dir - if not os.path.exists(syspath(feeds_dir)): - os.makedirs(syspath(feeds_dir)) - - relative_to = self.config['relative_to'].get() - if relative_to: - self.config['relative_to'] = normpath(relative_to) - else: - self.config['relative_to'] = feeds_dir def _get_feeds_dir(lib): @@ -85,67 +60,90 @@ def _write_m3u(m3u_path, items_paths): """Append relative paths to items into m3u file. """ - with open(syspath(m3u_path), 'a') as f: + mkdirall(m3u_path) + with open(syspath(m3u_path), 'ab') as f: for path in items_paths: - f.write(path + '\n') + f.write(path + b'\n') -def _record_items(lib, basename, items): - """Records relative paths to the given items for each feed format - """ - feedsdir = bytestring_path(config['importfeeds']['dir'].as_filename()) - formats = config['importfeeds']['formats'].as_str_seq() - relative_to = config['importfeeds']['relative_to'].get() \ - or config['importfeeds']['dir'].as_filename() - relative_to = bytestring_path(relative_to) - - paths = [] - for item in items: - if config['importfeeds']['absolute_path']: - paths.append(item.path) +class ImportFeedsPlugin(BeetsPlugin): + def __init__(self): + super(ImportFeedsPlugin, self).__init__() + + self.config.add({ + 'formats': [], + 'm3u_name': u'imported.m3u', + 'dir': None, + 'relative_to': None, + 'absolute_path': False, + }) + + feeds_dir = self.config['dir'].get() + if feeds_dir: + feeds_dir = os.path.expanduser(bytestring_path(feeds_dir)) + self.config['dir'] = feeds_dir + if not os.path.exists(syspath(feeds_dir)): + os.makedirs(syspath(feeds_dir)) + + relative_to = self.config['relative_to'].get() + if relative_to: + self.config['relative_to'] = normpath(relative_to) else: - try: - relpath = os.path.relpath(item.path, relative_to) - except ValueError: - # On Windows, it is sometimes not possible to construct a - # relative path (if the files are on different disks). - relpath = item.path - paths.append(relpath) - - if 'm3u' in formats: - basename = bytestring_path( - config['importfeeds']['m3u_name'].get(unicode) - ) - m3u_path = os.path.join(feedsdir, basename) - _write_m3u(m3u_path, paths) - - if 'm3u_multi' in formats: - m3u_path = _build_m3u_filename(basename) - _write_m3u(m3u_path, paths) - - if 'link' in formats: - for path in paths: - dest = os.path.join(feedsdir, os.path.basename(path)) - if not os.path.exists(syspath(dest)): - os.symlink(syspath(path), syspath(dest)) - - if 'echo' in formats: - log.info("Location of imported music:") - for path in paths: - log.info(" " + path) - - -@ImportFeedsPlugin.listen('library_opened') -def library_opened(lib): - if config['importfeeds']['dir'].get() is None: - config['importfeeds']['dir'] = _get_feeds_dir(lib) - - -@ImportFeedsPlugin.listen('album_imported') -def album_imported(lib, album): - _record_items(lib, album.album, album.items()) - - -@ImportFeedsPlugin.listen('item_imported') -def item_imported(lib, item): - _record_items(lib, item.title, [item]) + self.config['relative_to'] = feeds_dir + + self.register_listener('library_opened', self.library_opened) + self.register_listener('album_imported', self.album_imported) + self.register_listener('item_imported', self.item_imported) + + def _record_items(self, lib, basename, items): + """Records relative paths to the given items for each feed format + """ + feedsdir = bytestring_path(self.config['dir'].as_filename()) + formats = self.config['formats'].as_str_seq() + relative_to = self.config['relative_to'].get() \ + or self.config['dir'].as_filename() + relative_to = bytestring_path(relative_to) + + paths = [] + for item in items: + if self.config['absolute_path']: + paths.append(item.path) + else: + try: + relpath = os.path.relpath(item.path, relative_to) + except ValueError: + # On Windows, it is sometimes not possible to construct a + # relative path (if the files are on different disks). + relpath = item.path + paths.append(relpath) + + if 'm3u' in formats: + m3u_basename = bytestring_path( + self.config['m3u_name'].get(unicode)) + m3u_path = os.path.join(feedsdir, m3u_basename) + _write_m3u(m3u_path, paths) + + if 'm3u_multi' in formats: + m3u_path = _build_m3u_filename(basename) + _write_m3u(m3u_path, paths) + + if 'link' in formats: + for path in paths: + dest = os.path.join(feedsdir, os.path.basename(path)) + if not os.path.exists(syspath(dest)): + os.symlink(syspath(path), syspath(dest)) + + if 'echo' in formats: + self._log.info(u"Location of imported music:") + for path in paths: + self._log.info(u" {0}", path) + + def library_opened(self, lib): + if self.config['dir'].get() is None: + self.config['dir'] = _get_feeds_dir(lib) + + def album_imported(self, lib, album): + self._record_items(lib, album.album, album.items()) + + def item_imported(self, lib, item): + self._record_items(lib, item.title, [item]) diff -Nru beets-1.3.8+dfsg/beetsplug/info.py beets-1.3.19/beetsplug/info.py --- beets-1.3.8+dfsg/beetsplug/info.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/info.py 2016-06-20 17:08:57.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,58 +16,18 @@ """Shows file metadata. """ +from __future__ import division, absolute_import, print_function + import os -import logging +import re from beets.plugins import BeetsPlugin from beets import ui from beets import mediafile +from beets.library import Item from beets.util import displayable_path, normpath, syspath -log = logging.getLogger('beets') - - -def run(lib, opts, args): - """Print tag info or library data for each file referenced by args. - - Main entry point for the `beet info ARGS...` command. - - If an argument is a path pointing to an existing file, then the tags - of that file are printed. All other arguments are considered - queries, and for each item matching all those queries the tags from - the file are printed. - - If `opts.summarize` is true, the function merges all tags into one - dictionary and only prints that. If two files have different values - for the same tag, the value is set to '[various]' - """ - if opts.library: - data_collector = library_data - else: - data_collector = tag_data - - first = True - summary = {} - for data_emitter in data_collector(lib, ui.decargs(args)): - try: - data = data_emitter() - except mediafile.UnreadableFileError as ex: - log.error(u'cannot read file: {0}'.format(ex.message)) - continue - - if opts.summarize: - update_summary(summary, data) - else: - if not first: - ui.print_() - print_data(data) - first = False - - if opts.summarize: - print_data(summary) - - def tag_data(lib, args): query = [] for arg in args: @@ -90,8 +51,10 @@ for field in fields: tags[field] = getattr(mf, field) tags['art'] = mf.art is not None - tags['path'] = displayable_path(path) - return tags + # create a temporary Item to take advantage of __format__ + item = Item.from_path(syspath(path)) + + return tags, item return emitter @@ -103,8 +66,9 @@ def library_data_emitter(item): def emitter(): data = dict(item.formatted()) - data['path'] = displayable_path(item.path) - return data + data.pop('path', None) # path is fetched from item + + return data, item return emitter @@ -117,8 +81,20 @@ return summary -def print_data(data): - path = data.pop('path') +def print_data(data, item=None, fmt=None): + """Print, with optional formatting, the fields of a single element. + + If no format string `fmt` is passed, the entries on `data` are printed one + in each line, with the format 'field: value'. If `fmt` is not `None`, the + `item` is printed according to `fmt`, using the `Item.__format__` + machinery. + """ + if fmt: + # use fmt specified by the user + ui.print_(format(item, fmt)) + return + + path = displayable_path(item.path) if item else None formatted = {} for key, value in data.iteritems(): if isinstance(value, list): @@ -126,6 +102,9 @@ if value is not None: formatted[key] = value + if len(formatted) == 0: + return + maxwidth = max(len(key) for key in formatted) lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth) @@ -139,13 +118,124 @@ ui.print_(lineformat.format(field, value)) +def print_data_keys(data, item=None): + """Print only the keys (field names) for an item. + """ + path = displayable_path(item.path) if item else None + formatted = [] + for key, value in data.iteritems(): + formatted.append(key) + + if len(formatted) == 0: + return + + line_format = u'{0}{{0}}'.format(u' ' * 4) + if path: + ui.print_(displayable_path(path)) + + for field in sorted(formatted): + ui.print_(line_format.format(field)) + + class InfoPlugin(BeetsPlugin): def commands(self): - cmd = ui.Subcommand('info', help='show file metadata') - cmd.func = run - cmd.parser.add_option('-l', '--library', action='store_true', - help='show library fields instead of tags') - cmd.parser.add_option('-s', '--summarize', action='store_true', - help='summarize the tags of all files') + cmd = ui.Subcommand('info', help=u'show file metadata') + cmd.func = self.run + cmd.parser.add_option( + u'-l', u'--library', action='store_true', + help=u'show library fields instead of tags', + ) + cmd.parser.add_option( + u'-s', u'--summarize', action='store_true', + help=u'summarize the tags of all files', + ) + cmd.parser.add_option( + u'-i', u'--include-keys', default=[], + action='append', dest='included_keys', + help=u'comma separated list of keys to show', + ) + cmd.parser.add_option( + u'-k', u'--keys-only', action='store_true', + help=u'show only the keys', + ) + cmd.parser.add_format_option(target='item') return [cmd] + + def run(self, lib, opts, args): + """Print tag info or library data for each file referenced by args. + + Main entry point for the `beet info ARGS...` command. + + If an argument is a path pointing to an existing file, then the tags + of that file are printed. All other arguments are considered + queries, and for each item matching all those queries the tags from + the file are printed. + + If `opts.summarize` is true, the function merges all tags into one + dictionary and only prints that. If two files have different values + for the same tag, the value is set to '[various]' + """ + if opts.library: + data_collector = library_data + else: + data_collector = tag_data + + included_keys = [] + for keys in opts.included_keys: + included_keys.extend(keys.split(',')) + key_filter = make_key_filter(included_keys) + + first = True + summary = {} + for data_emitter in data_collector(lib, ui.decargs(args)): + try: + data, item = data_emitter() + except (mediafile.UnreadableFileError, IOError) as ex: + self._log.error(u'cannot read file: {0}', ex) + continue + + data = key_filter(data) + if opts.summarize: + update_summary(summary, data) + else: + if not first: + ui.print_() + if opts.keys_only: + print_data_keys(data, item) + else: + print_data(data, item, opts.format) + first = False + + if opts.summarize: + print_data(summary) + + +def make_key_filter(include): + """Return a function that filters a dictionary. + + The returned filter takes a dictionary and returns another + dictionary that only includes the key-value pairs where the key + glob-matches one of the keys in `include`. + """ + if not include: + return identity + + matchers = [] + for key in include: + key = re.escape(key) + key = key.replace(r'\*', '.*') + matchers.append(re.compile(key + '$')) + + def filter_(data): + filtered = dict() + for key, value in data.items(): + if any([m.match(key) for m in matchers]): + filtered[key] = value + return filtered + + return filter_ + + +def identity(val): + return val diff -Nru beets-1.3.8+dfsg/beetsplug/__init__.py beets-1.3.19/beetsplug/__init__.py --- beets-1.3.8+dfsg/beetsplug/__init__.py 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/beetsplug/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,6 +15,8 @@ """A namespace package for beets plugins.""" +from __future__ import division, absolute_import, print_function + # Make this a namespace package. from pkgutil import extend_path __path__ = extend_path(__path__, __name__) diff -Nru beets-1.3.8+dfsg/beetsplug/inline.py beets-1.3.19/beetsplug/inline.py --- beets-1.3.8+dfsg/beetsplug/inline.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/inline.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,15 +15,14 @@ """Allows inline path template customization code in the config file. """ -import logging +from __future__ import division, absolute_import, print_function + import traceback import itertools from beets.plugins import BeetsPlugin from beets import config -log = logging.getLogger('beets') - FUNC_NAME = u'__INLINE_FUNC__' @@ -50,55 +50,6 @@ return env[FUNC_NAME] -def compile_inline(python_code, album): - """Given a Python expression or function body, compile it as a path - field function. The returned function takes a single argument, an - Item, and returns a Unicode string. If the expression cannot be - compiled, then an error is logged and this function returns None. - """ - # First, try compiling as a single function. - try: - code = compile(u'({0})'.format(python_code), 'inline', 'eval') - except SyntaxError: - # Fall back to a function body. - try: - func = _compile_func(python_code) - except SyntaxError: - log.error(u'syntax error in inline field definition:\n{0}', - traceback.format_exc()) - return - else: - is_expr = False - else: - is_expr = True - - def _dict_for(obj): - out = dict(obj) - if album: - out['items'] = list(obj.items()) - return out - - if is_expr: - # For expressions, just evaluate and return the result. - def _expr_func(obj): - values = _dict_for(obj) - try: - return eval(code, values) - except Exception as exc: - raise InlineError(python_code, exc) - return _expr_func - else: - # For function bodies, invoke the function with values as global - # variables. - def _func_func(obj): - func.__globals__.update(_dict_for(obj)) - try: - return func() - except Exception as exc: - raise InlineError(python_code, exc) - return _func_func - - class InlinePlugin(BeetsPlugin): def __init__(self): super(InlinePlugin, self).__init__() @@ -112,14 +63,62 @@ # Item fields. for key, view in itertools.chain(config['item_fields'].items(), config['pathfields'].items()): - log.debug(u'inline: adding item field {0}'.format(key)) - func = compile_inline(view.get(unicode), False) + self._log.debug(u'adding item field {0}', key) + func = self.compile_inline(view.get(unicode), False) if func is not None: self.template_fields[key] = func # Album fields. for key, view in config['album_fields'].items(): - log.debug(u'inline: adding album field {0}'.format(key)) - func = compile_inline(view.get(unicode), True) + self._log.debug(u'adding album field {0}', key) + func = self.compile_inline(view.get(unicode), True) if func is not None: self.album_template_fields[key] = func + + def compile_inline(self, python_code, album): + """Given a Python expression or function body, compile it as a path + field function. The returned function takes a single argument, an + Item, and returns a Unicode string. If the expression cannot be + compiled, then an error is logged and this function returns None. + """ + # First, try compiling as a single function. + try: + code = compile(u'({0})'.format(python_code), 'inline', 'eval') + except SyntaxError: + # Fall back to a function body. + try: + func = _compile_func(python_code) + except SyntaxError: + self._log.error(u'syntax error in inline field definition:\n' + u'{0}', traceback.format_exc()) + return + else: + is_expr = False + else: + is_expr = True + + def _dict_for(obj): + out = dict(obj) + if album: + out['items'] = list(obj.items()) + return out + + if is_expr: + # For expressions, just evaluate and return the result. + def _expr_func(obj): + values = _dict_for(obj) + try: + return eval(code, values) + except Exception as exc: + raise InlineError(python_code, exc) + return _expr_func + else: + # For function bodies, invoke the function with values as global + # variables. + def _func_func(obj): + func.__globals__.update(_dict_for(obj)) + try: + return func() + except Exception as exc: + raise InlineError(python_code, exc) + return _func_func diff -Nru beets-1.3.8+dfsg/beetsplug/ipfs.py beets-1.3.19/beetsplug/ipfs.py --- beets-1.3.8+dfsg/beetsplug/ipfs.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/ipfs.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon +""" + +from __future__ import division, absolute_import, print_function + +from beets import ui, util, library, config +from beets.plugins import BeetsPlugin + +import subprocess +import shutil +import os +import tempfile + + +class IPFSPlugin(BeetsPlugin): + + def __init__(self): + super(IPFSPlugin, self).__init__() + self.config.add({ + 'auto': True, + }) + + if self.config['auto']: + self.import_stages = [self.auto_add] + + def commands(self): + cmd = ui.Subcommand('ipfs', + help='interact with ipfs') + cmd.parser.add_option('-a', '--add', dest='add', + action='store_true', + help='Add to ipfs') + cmd.parser.add_option('-g', '--get', dest='get', + action='store_true', + help='Get from ipfs') + cmd.parser.add_option('-p', '--publish', dest='publish', + action='store_true', + help='Publish local library to ipfs') + cmd.parser.add_option('-i', '--import', dest='_import', + action='store_true', + help='Import remote library from ipfs') + cmd.parser.add_option('-l', '--list', dest='_list', + action='store_true', + help='Query imported libraries') + cmd.parser.add_option('-m', '--play', dest='play', + action='store_true', + help='Play music from remote libraries') + + def func(lib, opts, args): + if opts.add: + for album in lib.albums(ui.decargs(args)): + if len(album.items()) == 0: + self._log.info('{0} does not contain items, aborting', + album) + + self.ipfs_add(album) + album.store() + + if opts.get: + self.ipfs_get(lib, ui.decargs(args)) + + if opts.publish: + self.ipfs_publish(lib) + + if opts._import: + self.ipfs_import(lib, ui.decargs(args)) + + if opts._list: + self.ipfs_list(lib, ui.decargs(args)) + + if opts.play: + self.ipfs_play(lib, opts, ui.decargs(args)) + + cmd.func = func + return [cmd] + + def auto_add(self, session, task): + if task.is_album: + if self.ipfs_add(task.album): + task.album.store() + + def ipfs_play(self, lib, opts, args): + from beetsplug.play import PlayPlugin + + jlib = self.get_remote_lib(lib) + player = PlayPlugin() + config['play']['relative_to'] = None + player.album = True + player.play_music(jlib, player, args) + + def ipfs_add(self, album): + try: + album_dir = album.item_dir() + except AttributeError: + return False + try: + if album.ipfs: + self._log.debug('{0} already added', album_dir) + # Already added to ipfs + return False + except AttributeError: + pass + + self._log.info('Adding {0} to ipfs', album_dir) + + cmd = "ipfs add -q -r".split() + cmd.append(album_dir) + try: + output = util.command_output(cmd).split() + except (OSError, subprocess.CalledProcessError) as exc: + self._log.error(u'Failed to add {0}, error: {1}', album_dir, exc) + return False + length = len(output) + + for linenr, line in enumerate(output): + line = line.strip() + if linenr == length - 1: + # last printed line is the album hash + self._log.info("album: {0}", line) + album.ipfs = line + else: + try: + item = album.items()[linenr] + self._log.info("item: {0}", line) + item.ipfs = line + item.store() + except IndexError: + # if there's non music files in the to-add folder they'll + # get ignored here + pass + + return True + + def ipfs_get(self, lib, query): + query = query[0] + # Check if query is a hash + if query.startswith("Qm") and len(query) == 46: + self.ipfs_get_from_hash(lib, query) + else: + albums = self.query(lib, query) + for album in albums: + self.ipfs_get_from_hash(lib, album.ipfs) + + def ipfs_get_from_hash(self, lib, _hash): + try: + cmd = "ipfs get".split() + cmd.append(_hash) + util.command_output(cmd) + except (OSError, subprocess.CalledProcessError) as err: + self._log.error('Failed to get {0} from ipfs.\n{1}', + _hash, err.output) + return False + + self._log.info('Getting {0} from ipfs', _hash) + imp = ui.commands.TerminalImportSession(lib, loghandler=None, + query=None, paths=[_hash]) + imp.run() + shutil.rmtree(_hash) + + def ipfs_publish(self, lib): + with tempfile.NamedTemporaryFile() as tmp: + self.ipfs_added_albums(lib, tmp.name) + try: + cmd = "ipfs add -q ".split() + cmd.append(tmp.name) + output = util.command_output(cmd) + except (OSError, subprocess.CalledProcessError) as err: + msg = "Failed to publish library. Error: {0}".format(err) + self._log.error(msg) + return False + self._log.info("hash of library: {0}", output) + + def ipfs_import(self, lib, args): + _hash = args[0] + if len(args) > 1: + lib_name = args[1] + else: + lib_name = _hash + lib_root = os.path.dirname(lib.path) + remote_libs = lib_root + "/remotes" + if not os.path.exists(remote_libs): + try: + os.makedirs(remote_libs) + except OSError as e: + msg = "Could not create {0}. Error: {1}".format(remote_libs, e) + self._log.error(msg) + return False + path = remote_libs + "/" + lib_name + ".db" + if not os.path.exists(path): + cmd = "ipfs get {0} -o".format(_hash).split() + cmd.append(path) + try: + util.command_output(cmd) + except (OSError, subprocess.CalledProcessError): + self._log.error("Could not import {0}".format(_hash)) + return False + + # add all albums from remotes into a combined library + jpath = remote_libs + "/joined.db" + jlib = library.Library(jpath) + nlib = library.Library(path) + for album in nlib.albums(): + if not self.already_added(album, jlib): + new_album = [] + for item in album.items(): + item.id = None + new_album.append(item) + added_album = jlib.add_album(new_album) + added_album.ipfs = album.ipfs + added_album.store() + + def already_added(self, check, jlib): + for jalbum in jlib.albums(): + if jalbum.mb_albumid == check.mb_albumid: + return True + return False + + def ipfs_list(self, lib, args): + fmt = config['format_album'].get() + try: + albums = self.query(lib, args) + except IOError: + ui.print_("No imported libraries yet.") + return + + for album in albums: + ui.print_(format(album, fmt), " : ", album.ipfs) + + def query(self, lib, args): + rlib = self.get_remote_lib(lib) + albums = rlib.albums(args) + return albums + + def get_remote_lib(self, lib): + lib_root = os.path.dirname(lib.path) + remote_libs = lib_root + "/remotes" + path = remote_libs + "/joined.db" + if not os.path.isfile(path): + raise IOError + return library.Library(path) + + def ipfs_added_albums(self, rlib, tmpname): + """ Returns a new library with only albums/items added to ipfs + """ + tmplib = library.Library(tmpname) + for album in rlib.albums(): + try: + if album.ipfs: + self.create_new_album(album, tmplib) + except AttributeError: + pass + return tmplib + + def create_new_album(self, album, tmplib): + items = [] + for item in album.items(): + try: + if not item.ipfs: + break + except AttributeError: + pass + # Clear current path from item + item.path = '/ipfs/{0}/{1}'.format(album.ipfs, + os.path.basename(item.path)) + + item.id = None + items.append(item) + if len(items) < 1: + return False + self._log.info("Adding '{0}' to temporary library", album) + new_album = tmplib.add_album(items) + new_album.ipfs = album.ipfs + new_album.store() diff -Nru beets-1.3.8+dfsg/beetsplug/keyfinder.py beets-1.3.19/beetsplug/keyfinder.py --- beets-1.3.8+dfsg/beetsplug/keyfinder.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/keyfinder.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,7 +16,8 @@ """Uses the `KeyFinder` program to add the `initial_key` field. """ -import logging +from __future__ import division, absolute_import, print_function + import subprocess from beets import ui @@ -23,9 +25,6 @@ from beets.plugins import BeetsPlugin -log = logging.getLogger('beets') - - class KeyFinderPlugin(BeetsPlugin): def __init__(self): @@ -35,23 +34,23 @@ u'auto': True, u'overwrite': False, }) - self.config['auto'].get(bool) - self.import_stages = [self.imported] + + if self.config['auto'].get(bool): + self.import_stages = [self.imported] def commands(self): cmd = ui.Subcommand('keyfinder', - help='detect and add initial key from audio') + help=u'detect and add initial key from audio') cmd.func = self.command return [cmd] def command(self, lib, opts, args): - self.find_key(lib.items(ui.decargs(args))) + self.find_key(lib.items(ui.decargs(args)), write=ui.should_write()) def imported(self, session, task): - if self.config['auto'].get(bool): - self.find_key(task.items) + self.find_key(task.items) - def find_key(self, items): + def find_key(self, items, write=False): overwrite = self.config['overwrite'].get(bool) bin = util.bytestring_path(self.config['bin'].get(unicode)) @@ -60,13 +59,29 @@ continue try: - key = util.command_output([bin, '-f', item.path]) + output = util.command_output([bin, b'-f', + util.syspath(item.path)]) except (subprocess.CalledProcessError, OSError) as exc: - log.error(u'KeyFinder execution failed: {0}'.format(exc)) + self._log.error(u'execution failed: {0}', exc) + continue + except UnicodeEncodeError: + # Workaround for Python 2 Windows bug. + # http://bugs.python.org/issue1759845 + self._log.error(u'execution failed for Unicode path: {0!r}', + item.path) + continue + + key_raw = output.rsplit(None, 1)[-1] + try: + key = key_raw.decode('utf8') + except UnicodeDecodeError: + self._log.error(u'output is invalid UTF-8') continue item['initial_key'] = key - log.debug(u'added computed initial key {0} for {1}' - .format(key, util.displayable_path(item.path))) - item.try_write() + self._log.info(u'added computed initial key {0} for {1}', + key, util.displayable_path(item.path)) + + if write: + item.try_write() item.store() diff -Nru beets-1.3.8+dfsg/beetsplug/lastgenre/genres-tree.yaml beets-1.3.19/beetsplug/lastgenre/genres-tree.yaml --- beets-1.3.8+dfsg/beetsplug/lastgenre/genres-tree.yaml 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/lastgenre/genres-tree.yaml 2016-06-20 01:53:12.000000000 +0000 @@ -133,7 +133,6 @@ - chutney - chutney soca - compas - - dance hall - mambo - merengue - méringue @@ -148,7 +147,6 @@ - punta - punta rock - rasin - - reggae - reggaeton - salsa - soca @@ -156,10 +154,37 @@ - timba - twoubadou - zouk +- classical: + - ballet + - baroque: + - baroque music + - cantata + - chamber music: + - string quartet + - classical music + - concerto: + - concerto grosso + - contemporary classical + - modern classical + - opera + - oratorio + - orchestra: + - orchestral + - symphonic + - symphony + - organum + - mass: + - requiem + - sacred music: + - cantique + - gregorian chant + - sonata - comedy: - comedy music - comedy rock + - humor - parody music + - stand-up - country: - alternative country: - cowpunk @@ -264,7 +289,7 @@ - neurofunk - oldschool jungle: - darkside jungle - - ragga-jungle + - ragga jungle - raggacore - sambass - techstep @@ -467,8 +492,8 @@ - lyrical hip hop - merenrap - midwest hip hop: - - chicago hip hop: - - detroit hip hop: + - chicago hip hop + - detroit hip hop - horrorcore - st. louis hip hop - twin cities hip hop @@ -478,12 +503,10 @@ - new school hip hop - old school hip hop - political hip hop - - ragga - rap opera - rap rock: - rap metal - rapcore - - reggae español/spanish reggae - songo-salsa - southern hip hop: - atlanta hip hop: @@ -625,7 +648,7 @@ - glam rock - hard rock - heavy metal: - - alternative metal: + - alternative metal - black metal: - viking metal - christian metal @@ -708,12 +731,19 @@ - surf rock - visual kei: - nagoya kei -- ska: - - 2 tone - - dancehall - - dub - - lovers rock - - ragga jungle - - polish reggae +- reggae: + - roots reggae - reggae fusion - - rocksteady + - reggae en español: + - spanish reggae + - reggae 110 + - reggae bultrón + - romantic flow + - lovers rock + - raggamuffin: + - ragga + - dancehall + - ska: + - 2 tone + - dub + - rocksteady diff -Nru beets-1.3.8+dfsg/beetsplug/lastgenre/genres.txt beets-1.3.19/beetsplug/lastgenre/genres.txt --- beets-1.3.8+dfsg/beetsplug/lastgenre/genres.txt 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/lastgenre/genres.txt 2016-06-20 01:53:12.000000000 +0000 @@ -74,6 +74,7 @@ bantowbol barbershop music barndance +baroque baroque music baroque pop bass music @@ -265,6 +266,7 @@ classic country classic female blues classic rock +classical classical music classical music era clicks n cuts @@ -275,6 +277,7 @@ coladeira colombianas combined rhythm +comedy comedy rap comedy rock comic opera @@ -288,6 +291,7 @@ conjunto contemporary christian contemporary christian music +contemporary classical contemporary r&b contonbley contradanza @@ -647,6 +651,7 @@ huasteco huayno hula +humor humppa hunguhungu hyangak @@ -964,6 +969,7 @@ minuet mirolóyia modal jazz +modern classical modern classical music modern laika modern rock @@ -1079,6 +1085,7 @@ operatic pop oratorio orchestra +orchestral organ trio organic ambient organum @@ -1217,6 +1224,9 @@ red dirt reel reggae +reggae 110 +reggae bultrón +reggae en español reggae fusion reggae highlife reggaefusion @@ -1240,6 +1250,7 @@ rockabilly rocksteady rococo +romantic flow romantic period in music rondeaux ronggeng @@ -1251,6 +1262,7 @@ rímur sabar sacred harp +sacred music sadcore saibara sakara @@ -1352,6 +1364,7 @@ square dance squee st. louis blues +stand-up steelband stoner metal stoner rock @@ -1377,6 +1390,7 @@ swing music swingbeat sygyt +symphonic symphonic black metal symphonic metal symphonic poem diff -Nru beets-1.3.8+dfsg/beetsplug/lastgenre/__init__.py beets-1.3.19/beetsplug/lastgenre/__init__.py --- beets-1.3.8+dfsg/beetsplug/lastgenre/__init__.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/lastgenre/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,6 +13,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Gets genres for imported music based on Last.fm tags. Uses a provided whitelist file to determine which tags are valid genres. @@ -20,18 +23,17 @@ The scraper script used is available here: https://gist.github.com/1241307 """ -import logging import pylast import os import yaml +import traceback from beets import plugins from beets import ui -from beets.util import normpath, plurality from beets import config +from beets.util import normpath, plurality from beets import library -log = logging.getLogger('beets') LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY) @@ -41,6 +43,10 @@ pylast.NetworkError, ) +REPLACE = { + u'\u2010': '-', +} + def deduplicate(seq): """Remove duplicates from sequence wile preserving order. @@ -49,40 +55,8 @@ return [x for x in seq if x not in seen and not seen.add(x)] -# Core genre identification routine. - -def _tags_for(obj, min_weight=None): - """Given a pylast entity (album or track), return a list of - tag names for that entity. Return an empty list if the entity is - not found or another error occurs. - - If `min_weight` is specified, tags are filtered by weight. - """ - try: - # Work around an inconsistency in pylast where - # Album.get_top_tags() does not return TopItem instances. - # https://code.google.com/p/pylast/issues/detail?id=85 - if isinstance(obj, pylast.Album): - res = super(pylast.Album, obj).get_top_tags() - else: - res = obj.get_top_tags() - except PYLAST_EXCEPTIONS as exc: - log.debug(u'last.fm error: {0}'.format(exc)) - return [] - - # Filter by weight (optionally). - if min_weight: - res = [el for el in res if (el.weight or 0) >= min_weight] - - # Get strings from tags. - res = [el.item.get_name().lower() for el in res] - - return res - - # Canonicalization tree processing. - def flatten_tree(elem, path, branches): """Flatten nested lists/dictionaries into lists of strings (branches). @@ -152,7 +126,7 @@ wl_filename = WHITELIST if wl_filename: wl_filename = normpath(wl_filename) - with open(wl_filename, 'r') as f: + with open(wl_filename, 'rb') as f: for line in f: line = line.decode('utf8').strip().lower() if line and not line.startswith(u'#'): @@ -221,7 +195,7 @@ can be found. Ex. 'Electronic, House, Dance' """ min_weight = self.config['min_weight'].get(int) - return self._resolve_genres(_tags_for(lastfm_obj, min_weight)) + return self._resolve_genres(self._tags_for(lastfm_obj, min_weight)) def _is_allowed(self, genre): """Determine whether the genre is present in the whitelist, @@ -235,10 +209,13 @@ # Cached entity lookups. - def _cached_lookup(self, entity, method, *args): + def _last_lookup(self, entity, method, *args): """Get a genre based on the named entity using the callable `method` whose arguments are given in the sequence `args`. The genre lookup - is cached based on the entity name and the arguments. + is cached based on the entity name and the arguments. Before the + lookup, each argument is has some Unicode characters replaced with + rough ASCII equivalents in order to return better results from the + Last.fm database. """ # Shortcut if we're missing metadata. if any(not s for s in args): @@ -248,32 +225,43 @@ if key in self._genre_cache: return self._genre_cache[key] else: - genre = self.fetch_genre(method(*args)) + args_replaced = [] + for arg in args: + for k, v in REPLACE.items(): + arg = arg.replace(k, v) + args_replaced.append(arg) + + genre = self.fetch_genre(method(*args_replaced)) self._genre_cache[key] = genre return genre def fetch_album_genre(self, obj): """Return the album genre for this Item or Album. """ - return self._cached_lookup(u'album', LASTFM.get_album, obj.albumartist, - obj.album) + return self._last_lookup( + u'album', LASTFM.get_album, obj.albumartist, obj.album + ) def fetch_album_artist_genre(self, obj): """Return the album artist genre for this Item or Album. """ - return self._cached_lookup(u'artist', LASTFM.get_artist, - obj.albumartist) + return self._last_lookup( + u'artist', LASTFM.get_artist, obj.albumartist + ) def fetch_artist_genre(self, item): """Returns the track artist genre for this Item. """ - return self._cached_lookup(u'artist', LASTFM.get_artist, item.artist) + return self._last_lookup( + u'artist', LASTFM.get_artist, item.artist + ) def fetch_track_genre(self, obj): """Returns the track genre for this Item. """ - return self._cached_lookup(u'track', LASTFM.get_track, obj.artist, - obj.title) + return self._last_lookup( + u'track', LASTFM.get_track, obj.artist, obj.title + ) def _get_genre(self, obj): """Get the genre string for an Album or Item object based on @@ -309,7 +297,7 @@ result = None if isinstance(obj, library.Item): result = self.fetch_artist_genre(obj) - elif obj.albumartist != 'Various Artists': + elif obj.albumartist != config['va_name'].get(unicode): result = self.fetch_album_artist_genre(obj) else: # For "Various Artists", pick the most popular track genre. @@ -342,25 +330,25 @@ return None, None def commands(self): - lastgenre_cmd = ui.Subcommand('lastgenre', help='fetch genres') + lastgenre_cmd = ui.Subcommand('lastgenre', help=u'fetch genres') lastgenre_cmd.parser.add_option( - '-f', '--force', dest='force', action='store_true', default=False, - help='re-download genre when already present' + u'-f', u'--force', dest='force', + action='store_true', default=False, + help=u're-download genre when already present' ) lastgenre_cmd.parser.add_option( - '-s', '--source', dest='source', type='string', - help='genre source: artist, album, or track' + u'-s', u'--source', dest='source', type='string', + help=u'genre source: artist, album, or track' ) def lastgenre_func(lib, opts, args): - write = config['import']['write'].get(bool) + write = ui.should_write() self.config.set_args(opts) for album in lib.albums(ui.decargs(args)): album.genre, src = self._get_genre(album) - log.info(u'genre for album {0} - {1} ({2}): {3}'.format( - album.albumartist, album.album, src, album.genre - )) + self._log.info(u'genre for album {0} ({1}): {0.genre}', + album, src) album.store() for item in album.items(): @@ -369,9 +357,8 @@ if 'track' in self.sources: item.genre, src = self._get_genre(item) item.store() - log.info(u'genre for track {0} - {1} ({2}): {3}' - .format(item.artist, item.title, src, - item.genre)) + self._log.info(u'genre for track {0} ({1}): {0.genre}', + item, src) if write: item.try_write() @@ -384,20 +371,55 @@ if task.is_album: album = task.album album.genre, src = self._get_genre(album) - log.debug(u'added last.fm album genre ({0}): {1}'.format( - src, album.genre)) + self._log.debug(u'added last.fm album genre ({0}): {1}', + src, album.genre) album.store() if 'track' in self.sources: for item in album.items(): item.genre, src = self._get_genre(item) - log.debug(u'added last.fm item genre ({0}): {1}'.format( - src, item.genre)) + self._log.debug(u'added last.fm item genre ({0}): {1}', + src, item.genre) item.store() else: item = task.item item.genre, src = self._get_genre(item) - log.debug(u'added last.fm item genre ({0}): {1}'.format( - src, item.genre)) + self._log.debug(u'added last.fm item genre ({0}): {1}', + src, item.genre) item.store() + + def _tags_for(self, obj, min_weight=None): + """Core genre identification routine. + + Given a pylast entity (album or track), return a list of + tag names for that entity. Return an empty list if the entity is + not found or another error occurs. + + If `min_weight` is specified, tags are filtered by weight. + """ + # Work around an inconsistency in pylast where + # Album.get_top_tags() does not return TopItem instances. + # https://code.google.com/p/pylast/issues/detail?id=85 + if isinstance(obj, pylast.Album): + obj = super(pylast.Album, obj) + + try: + res = obj.get_top_tags() + except PYLAST_EXCEPTIONS as exc: + self._log.debug(u'last.fm error: {0}', exc) + return [] + except Exception as exc: + # Isolate bugs in pylast. + self._log.debug(u'{}', traceback.format_exc()) + self._log.error(u'error in pylast library: {0}', exc) + return [] + + # Filter by weight (optionally). + if min_weight: + res = [el for el in res if (int(el.weight or 0)) >= min_weight] + + # Get strings from tags. + res = [el.item.get_name().lower() for el in res] + + return res diff -Nru beets-1.3.8+dfsg/beetsplug/lastimport.py beets-1.3.19/beetsplug/lastimport.py --- beets-1.3.8+dfsg/beetsplug/lastimport.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/lastimport.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Rafael Bodill http://github.com/rafi +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import pylast +from pylast import TopItem, _extract, _number +from beets import ui +from beets import dbcore +from beets import config +from beets import plugins +from beets.dbcore import types + +API_URL = 'http://ws.audioscrobbler.com/2.0/' + + +class LastImportPlugin(plugins.BeetsPlugin): + def __init__(self): + super(LastImportPlugin, self).__init__() + config['lastfm'].add({ + 'user': '', + 'api_key': plugins.LASTFM_KEY, + }) + config['lastfm']['api_key'].redact = True + self.config.add({ + 'per_page': 500, + 'retry_limit': 3, + }) + self.item_types = { + 'play_count': types.INTEGER, + } + + def commands(self): + cmd = ui.Subcommand('lastimport', help=u'import last.fm play-count') + + def func(lib, opts, args): + import_lastfm(lib, self._log) + + cmd.func = func + return [cmd] + + +class CustomUser(pylast.User): + """ Custom user class derived from pylast.User, and overriding the + _get_things method to return MBID and album. Also introduces new + get_top_tracks_by_page method to allow access to more than one page of top + tracks. + """ + def __init__(self, *args, **kwargs): + super(CustomUser, self).__init__(*args, **kwargs) + + def _get_things(self, method, thing, thing_type, params=None, + cacheable=True): + """Returns a list of the most played thing_types by this thing, in a + tuple with the total number of pages of results. Includes an MBID, if + found. + """ + doc = self._request( + self.ws_prefix + "." + method, cacheable, params) + + toptracks_node = doc.getElementsByTagName('toptracks')[0] + total_pages = int(toptracks_node.getAttribute('totalPages')) + + seq = [] + for node in doc.getElementsByTagName(thing): + title = _extract(node, "name") + artist = _extract(node, "name", 1) + mbid = _extract(node, "mbid") + playcount = _number(_extract(node, "playcount")) + + thing = thing_type(artist, title, self.network) + thing.mbid = mbid + seq.append(TopItem(thing, playcount)) + + return seq, total_pages + + def get_top_tracks_by_page(self, period=pylast.PERIOD_OVERALL, limit=None, + page=1, cacheable=True): + """Returns the top tracks played by a user, in a tuple with the total + number of pages of results. + * period: The period of time. Possible values: + o PERIOD_OVERALL + o PERIOD_7DAYS + o PERIOD_1MONTH + o PERIOD_3MONTHS + o PERIOD_6MONTHS + o PERIOD_12MONTHS + """ + + params = self._get_params() + params['period'] = period + params['page'] = page + if limit: + params['limit'] = limit + + return self._get_things( + "getTopTracks", "track", pylast.Track, params, cacheable) + + +def import_lastfm(lib, log): + user = config['lastfm']['user'].get(unicode) + per_page = config['lastimport']['per_page'].get(int) + + if not user: + raise ui.UserError(u'You must specify a user name for lastimport') + + log.info(u'Fetching last.fm library for @{0}', user) + + page_total = 1 + page_current = 0 + found_total = 0 + unknown_total = 0 + retry_limit = config['lastimport']['retry_limit'].get(int) + # Iterate through a yet to be known page total count + while page_current < page_total: + log.info(u'Querying page #{0}{1}...', + page_current + 1, + '/{}'.format(page_total) if page_total > 1 else '') + + for retry in range(0, retry_limit): + tracks, page_total = fetch_tracks(user, page_current + 1, per_page) + if page_total < 1: + # It means nothing to us! + raise ui.UserError(u'Last.fm reported no data.') + + if tracks: + found, unknown = process_tracks(lib, tracks, log) + found_total += found + unknown_total += unknown + break + else: + log.error(u'ERROR: unable to read page #{0}', + page_current + 1) + if retry < retry_limit: + log.info( + u'Retrying page #{0}... ({1}/{2} retry)', + page_current + 1, retry + 1, retry_limit + ) + else: + log.error(u'FAIL: unable to fetch page #{0}, ', + u'tried {1} times', page_current, retry + 1) + page_current += 1 + + log.info(u'... done!') + log.info(u'finished processing {0} song pages', page_total) + log.info(u'{0} unknown play-counts', unknown_total) + log.info(u'{0} play-counts imported', found_total) + + +def fetch_tracks(user, page, limit): + """ JSON format: + [ + { + "mbid": "...", + "artist": "...", + "title": "...", + "playcount": "..." + } + ] + """ + network = pylast.LastFMNetwork(api_key=config['lastfm']['api_key']) + user_obj = CustomUser(user, network) + results, total_pages =\ + user_obj.get_top_tracks_by_page(limit=limit, page=page) + return [ + { + "mbid": track.item.mbid if track.item.mbid else '', + "artist": { + "name": track.item.artist.name + }, + "name": track.item.title, + "playcount": track.weight + } for track in results + ], total_pages + + +def process_tracks(lib, tracks, log): + total = len(tracks) + total_found = 0 + total_fails = 0 + log.info(u'Received {0} tracks in this page, processing...', total) + + for num in xrange(0, total): + song = None + trackid = tracks[num]['mbid'].strip() + artist = tracks[num]['artist'].get('name', '').strip() + title = tracks[num]['name'].strip() + album = '' + if 'album' in tracks[num]: + album = tracks[num]['album'].get('name', '').strip() + + log.debug(u'query: {0} - {1} ({2})', artist, title, album) + + # First try to query by musicbrainz's trackid + if trackid: + song = lib.items( + dbcore.query.MatchQuery('mb_trackid', trackid) + ).get() + + # If not, try just artist/title + if song is None: + log.debug(u'no album match, trying by artist/title') + query = dbcore.AndQuery([ + dbcore.query.SubstringQuery('artist', artist), + dbcore.query.SubstringQuery('title', title) + ]) + song = lib.items(query).get() + + # Last resort, try just replacing to utf-8 quote + if song is None: + title = title.replace("'", u'\u2019') + log.debug(u'no title match, trying utf-8 single quote') + query = dbcore.AndQuery([ + dbcore.query.SubstringQuery('artist', artist), + dbcore.query.SubstringQuery('title', title) + ]) + song = lib.items(query).get() + + if song is not None: + count = int(song.get('play_count', 0)) + new_count = int(tracks[num]['playcount']) + log.debug(u'match: {0} - {1} ({2}) ' + u'updating: play_count {3} => {4}', + song.artist, song.title, song.album, count, new_count) + song['play_count'] = new_count + song.store() + total_found += 1 + else: + total_fails += 1 + log.info(u' - No match: {0} - {1} ({2})', + artist, title, album) + + if total_fails > 0: + log.info(u'Acquired {0}/{1} play-counts ({2} unknown)', + total_found, total, total_fails) + + return total_found, total_fails diff -Nru beets-1.3.8+dfsg/beetsplug/lyrics.py beets-1.3.19/beetsplug/lyrics.py --- beets-1.3.8+dfsg/beetsplug/lyrics.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/lyrics.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,29 +15,39 @@ """Fetches, embeds, and displays lyrics. """ -from __future__ import print_function -import re -import logging -import urllib -import json -import unicodedata +from __future__ import absolute_import, division, print_function + import difflib import itertools +import json +import re +import requests +import unicodedata +import urllib +import warnings +from HTMLParser import HTMLParseError -from beets.plugins import BeetsPlugin -from beets import ui -from beets import config - +try: + from bs4 import SoupStrainer, BeautifulSoup + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + +try: + import langdetect + HAS_LANGDETECT = True +except ImportError: + HAS_LANGDETECT = False -# Global logger. +from beets import plugins +from beets import ui -log = logging.getLogger('beets') -DIV_RE = re.compile(r'<(/?)div>?') +DIV_RE = re.compile(r'<(/?)div>?', re.I) COMMENT_RE = re.compile(r'<!--.*-->', re.S) TAG_RE = re.compile(r'<[^>]*>') -BREAK_RE = re.compile(r'<br\s*/?>') +BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I) URL_CHARACTERS = { u'\u2018': u"'", u'\u2019': u"'", @@ -55,20 +66,10 @@ # Utilities. -def fetch_url(url): - """Retrieve the content at a given URL, or return None if the source - is unreachable. - """ - try: - return urllib.urlopen(url).read() - except IOError as exc: - log.debug(u'failed to fetch: {0} ({1})'.format(url, unicode(exc))) - return None - def unescape(text): - """Resolves &#xxx; HTML entities (and some others).""" - if isinstance(text, str): + """Resolve &#xxx; HTML entities (and some others).""" + if isinstance(text, bytes): text = text.decode('utf8', 'ignore') out = text.replace(u' ', u' ') @@ -79,10 +80,20 @@ return out -def extract_text(html, starttag): +def extract_text_between(html, start_marker, end_marker): + try: + _, html = html.split(start_marker, 1) + html, _ = html.split(end_marker, 1) + except ValueError: + return u'' + return html + + +def extract_text_in(html, starttag): """Extract the text from a <DIV> tag in the HTML starting with ``starttag``. Returns None if parsing fails. """ + # Strip off the leading text before opening tag. try: _, html = html.split(starttag, 1) @@ -101,35 +112,15 @@ else: # Opening tag. if level == 0: parts.append(html[pos:match.start()]) - level += 1 if level == -1: parts.append(html[pos:match.start()]) break else: - print('no closing tag found!') + print(u'no closing tag found!') return - lyrics = ''.join(parts) - return strip_cruft(lyrics) - - -def strip_cruft(lyrics, wscollapse=True): - """Clean up HTML from an extracted lyrics string. For example, <BR> - tags are replaced with newlines. - """ - lyrics = COMMENT_RE.sub('', lyrics) - lyrics = unescape(lyrics) - if wscollapse: - lyrics = re.sub(r'\s+', ' ', lyrics) # Whitespace collapse. - lyrics = re.sub(r'<(script).*?</\1>(?s)', '', lyrics) # Strip script tags. - lyrics = BREAK_RE.sub('\n', lyrics) # <BR> newlines. - lyrics = re.sub(r'\n +', '\n', lyrics) - lyrics = re.sub(r' +\n', '\n', lyrics) - lyrics = TAG_RE.sub('', lyrics) # Strip remaining HTML tags. - lyrics = lyrics.replace('\r', '\n') - lyrics = lyrics.strip() - return lyrics + return u''.join(parts) def search_pairs(item): @@ -140,7 +131,7 @@ In addition to the artist and title obtained from the `item` the method tries to strip extra information like paranthesized suffixes - and featured artists from the strings and add them as caniddates. + and featured artists from the strings and add them as candidates. The method also tries to split multiple titles separated with `/`. """ @@ -149,7 +140,7 @@ artists = [artist] # Remove any featuring artists from the artists name - pattern = r"(.*?) (&|\b(and|ft|feat(uring)?\b))" + pattern = r"(.*?) {0}".format(plugins.feat_tokens()) match = re.search(pattern, artist, re.IGNORECASE) if match: artists.append(match.group(1)) @@ -162,8 +153,8 @@ titles.append(match.group(1)) # Remove any featuring artists from the title - pattern = r"(.*?) \b(ft|feat(uring)?)\b" - for title in titles: + pattern = r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)) + for title in titles[:]: match = re.search(pattern, title, re.IGNORECASE) if match: titles.append(match.group(1)) @@ -179,149 +170,220 @@ return itertools.product(artists, multi_titles) -def _encode(s): - """Encode the string for inclusion in a URL (common to both - LyricsWiki and Lyrics.com). - """ - if isinstance(s, unicode): - for char, repl in URL_CHARACTERS.items(): - s = s.replace(char, repl) - s = s.encode('utf8', 'ignore') - return urllib.quote(s) - - -# LyricsWiki. - -LYRICSWIKI_URL_PATTERN = 'http://lyrics.wikia.com/%s:%s' - - -def _lw_encode(s): - s = re.sub(r'\s+', '_', s) - s = s.replace("<", "Less_Than") - s = s.replace(">", "Greater_Than") - s = s.replace("#", "Number_") - s = re.sub(r'[\[\{]', '(', s) - s = re.sub(r'[\]\}]', ')', s) - return _encode(s) - - -def fetch_lyricswiki(artist, title): - """Fetch lyrics from LyricsWiki.""" - url = LYRICSWIKI_URL_PATTERN % (_lw_encode(artist), _lw_encode(title)) - html = fetch_url(url) - if not html: - return +class Backend(object): + def __init__(self, config, log): + self._log = log + + @staticmethod + def _encode(s): + """Encode the string for inclusion in a URL""" + if isinstance(s, unicode): + for char, repl in URL_CHARACTERS.items(): + s = s.replace(char, repl) + s = s.encode('utf8', 'ignore') + return urllib.quote(s) + + def build_url(self, artist, title): + return self.URL_PATTERN % (self._encode(artist.title()), + self._encode(title.title())) + + def fetch_url(self, url): + """Retrieve the content at a given URL, or return None if the source + is unreachable. + """ + try: + # Disable the InsecureRequestWarning that comes from using + # `verify=false`. + # https://github.com/kennethreitz/requests/issues/2214 + # We're not overly worried about the NSA MITMing our lyrics scraper + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + r = requests.get(url, verify=False) + except requests.RequestException as exc: + self._log.debug(u'lyrics request failed: {0}', exc) + return + if r.status_code == requests.codes.ok: + return r.text + else: + self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code) - lyrics = extract_text(html, "<div class='lyricbox'>") - if lyrics and 'Unfortunately, we are not licensed' not in lyrics: - return lyrics + def fetch(self, artist, title): + raise NotImplementedError() -# Lyrics.com. +class SymbolsReplaced(Backend): + REPLACEMENTS = { + r'\s+': '_', + '<': 'Less_Than', + '>': 'Greater_Than', + '#': 'Number_', + r'[\[\{]': '(', + r'[\[\{]': ')' + } -LYRICSCOM_URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html' -LYRICSCOM_NOT_FOUND = ( - 'Sorry, we do not have the lyric', - 'Submit Lyrics', -) + @classmethod + def _encode(cls, s): + for old, new in cls.REPLACEMENTS.iteritems(): + s = re.sub(old, new, s) + return super(SymbolsReplaced, cls)._encode(s) -def _lc_encode(s): - s = re.sub(r'[^\w\s-]', '', s) - s = re.sub(r'\s+', '-', s) - return _encode(s).lower() +class MusiXmatch(SymbolsReplaced): + REPLACEMENTS = dict(SymbolsReplaced.REPLACEMENTS, **{ + r'\s+': '-' + }) -def fetch_lyricscom(artist, title): - """Fetch lyrics from Lyrics.com.""" - url = LYRICSCOM_URL_PATTERN % (_lc_encode(title), _lc_encode(artist)) - html = fetch_url(url) - if not html: - return + URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s' - lyrics = extract_text(html, '<div id="lyric_space">') - if not lyrics: - return - for not_found_str in LYRICSCOM_NOT_FOUND: - if not_found_str in lyrics: + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: return + lyrics = extract_text_between(html, + '"body":', '"language":') + return lyrics.strip(',"').replace('\\n', '\n') + + +class Genius(Backend): + """Fetch lyrics from Genius via genius-api.""" + def __init__(self, config, log): + super(Genius, self).__init__(config, log) + self.api_key = config['genius_api_key'].get(unicode) + self.headers = {'Authorization': "Bearer %s" % self.api_key} + + def search_genius(self, artist, title): + query = u"%s %s" % (artist, title) + url = u'https://api.genius.com/search?q=%s' \ + % (urllib.quote(query.encode('utf8'))) + + self._log.debug(u'genius: requesting search {}', url) + try: + req = requests.get( + url, + headers=self.headers, + allow_redirects=True + ) + req.raise_for_status() + except requests.RequestException as exc: + self._log.debug(u'genius: request error: {}', exc) + return None + + try: + return req.json() + except ValueError: + self._log.debug(u'genius: invalid response: {}', req.text) + return None + + def get_lyrics(self, link): + url = u'http://genius-api.com/api/lyricsInfo' + + self._log.debug(u'genius: requesting lyrics for link {}', link) + try: + req = requests.post( + url, + data={'link': link}, + headers=self.headers, + allow_redirects=True + ) + req.raise_for_status() + except requests.RequestException as exc: + self._log.debug(u'genius: request error: {}', exc) + return None + + try: + return req.json() + except ValueError: + self._log.debug(u'genius: invalid response: {}', req.text) + return None - parts = lyrics.split('\n---\nLyrics powered by', 1) - if parts: - return parts[0] - - -# Optional Google custom search API backend. - -def slugify(text): - """Normalize a string and remove non-alphanumeric characters. - """ - text = re.sub(r"[-'_\s]", '_', text) - text = re.sub(r"_+", '_', text).strip('_') - pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses - text = re.sub(pat, '\g<1>', text).strip() - try: - text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore') - text = unicode(re.sub('[-\s]+', ' ', text)) - except UnicodeDecodeError: - log.exception(u"Failing to normalize '{0}'".format(text)) - return text - + def build_lyric_string(self, lyrics): + if 'lyrics' not in lyrics: + return + sections = lyrics['lyrics']['sections'] -BY_TRANS = ['by', 'par', 'de', 'von'] -LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte'] + lyrics_list = [] + for section in sections: + lyrics_list.append(section['name']) + lyrics_list.append('\n') + for verse in section['verses']: + if 'content' in verse: + lyrics_list.append(verse['content']) + + return ''.join(lyrics_list) + + def fetch(self, artist, title): + search_data = self.search_genius(artist, title) + if not search_data: + return + if not search_data['meta']['status'] == 200: + return + else: + records = search_data['response']['hits'] + if not records: + return -def is_page_candidate(urlLink, urlTitle, title, artist): - """Return True if the URL title makes it a good candidate to be a - page that contains lyrics of title by artist. - """ - title = slugify(title.lower()) - artist = slugify(artist.lower()) - sitename = re.search(u"//([^/]+)/.*", slugify(urlLink.lower())).group(1) - urlTitle = slugify(urlTitle.lower()) - - # Check if URL title contains song title (exact match) - if urlTitle.find(title) != -1: - return True - # or try extracting song title from URL title and check if - # they are close enough - tokens = [by + '_' + artist for by in BY_TRANS] + \ - [artist, sitename, sitename.replace('www.', '')] + LYRICS_TRANS - songTitle = re.sub(u'(%s)' % u'|'.join(tokens), u'', urlTitle) + record_url = records[0]['result']['url'] + lyric_data = self.get_lyrics(record_url) + if not lyric_data: + return + lyrics = self.build_lyric_string(lyric_data) - typoRatio = .8 - return difflib.SequenceMatcher(None, songTitle, title).ratio() >= typoRatio + return lyrics -def insert_line_feeds(text): - """Insert newlines before upper-case characters. - """ - tokensStr = re.split("([a-z][A-Z])", text) - for idx in range(1, len(tokensStr), 2): - ltoken = list(tokensStr[idx]) - tokensStr[idx] = ltoken[0] + '\n' + ltoken[1] - return ''.join(tokensStr) +class LyricsWiki(SymbolsReplaced): + """Fetch lyrics from LyricsWiki.""" + URL_PATTERN = 'http://lyrics.wikia.com/%s:%s' + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: + return -def sanitize_lyrics(text): - """Clean text, returning raw lyrics as output or None if it happens - that input text is actually not lyrics content. Clean (x)html tags - in text, correct layout and syntax... - """ - text = strip_cruft(text, False) + # Get the HTML fragment inside the appropriate HTML element and then + # extract the text from it. + html_frag = extract_text_in(html, u"<div class='lyricbox'>") + if html_frag: + lyrics = _scrape_strip_cruft(html_frag, True) - # Restore \n in input text - if '\n' not in text: - text = insert_line_feeds(text) + if lyrics and 'Unfortunately, we are not licensed' not in lyrics: + return lyrics - while text.count('\n\n') > text.count('\n') // 4: - # Remove first occurrence of \n for each sequence of \n - text = re.sub(r'\n(\n+)', '\g<1>', text) - text = re.sub(r'\n\n+', '\n\n', text) # keep at most two \n in a row +class LyricsCom(Backend): + """Fetch lyrics from Lyrics.com.""" + URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html' + NOT_FOUND = ( + 'Sorry, we do not have the lyric', + 'Submit Lyrics', + ) + + @classmethod + def _encode(cls, s): + s = re.sub(r'[^\w\s-]', '', s) + s = re.sub(r'\s+', '-', s) + return super(LyricsCom, cls)._encode(s).lower() + + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: + return + lyrics = extract_text_between(html, '<div id="lyrics" class="SCREENO' + 'NLY" itemprop="description">', '</div>') + if not lyrics: + return + for not_found_str in self.NOT_FOUND: + if not_found_str in lyrics: + return - return text + parts = lyrics.split('\n---\nLyrics powered by', 1) + if parts: + return parts[0] def remove_credits(text): @@ -338,160 +400,278 @@ return text -def is_lyrics(text, artist=None): - """Determine whether the text seems to be valid lyrics. +def _scrape_strip_cruft(html, plain_text_out=False): + """Clean up HTML """ - if not text: - return + html = unescape(html) - badTriggersOcc = [] - nbLines = text.count('\n') - if nbLines <= 1: - log.debug(u"Ignoring too short lyrics '{0}'".format(text)) - return 0 - elif nbLines < 5: - badTriggersOcc.append('too_short') - else: - # Lyrics look legit, remove credits to avoid being penalized further - # down - text = remove_credits(text) + html = html.replace('\r', '\n') # Normalize EOL. + html = re.sub(r' +', ' ', html) # Whitespaces collapse. + html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'. + html = re.sub(r'<(script).*?</\1>(?s)', '', html) # Strip script tags. - badTriggers = ['lyrics', 'copyright', 'property'] - if artist: - badTriggersOcc += [artist] + if plain_text_out: # Strip remaining HTML tags + html = COMMENT_RE.sub('', html) + html = TAG_RE.sub('', html) - for item in badTriggers: - badTriggersOcc += [item] * len(re.findall(r'\W%s\W' % item, - text, re.I)) + html = '\n'.join([x.strip() for x in html.strip().split('\n')]) + html = re.sub(r'\n{3,}', r'\n\n', html) + return html - if badTriggersOcc: - log.debug(u'Bad triggers detected: {0}'.format(badTriggersOcc)) - return len(badTriggersOcc) < 2 +def _scrape_merge_paragraphs(html): + html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html) + return re.sub(r'<div .*>\s*</div>', '\n', html) -def scrape_lyrics_from_url(url): +def scrape_lyrics_from_html(html): """Scrape lyrics from a URL. If no lyrics can be found, return None instead. """ - from bs4 import BeautifulSoup, Comment - html = fetch_url(url) - if not html: + if not HAS_BEAUTIFUL_SOUP: return None - soup = BeautifulSoup(html) - - for tag in soup.findAll('br'): - tag.replaceWith('\n') + if not html: + return None - # Remove non relevant html parts - [s.extract() for s in soup(['head', 'script'])] - comments = soup.findAll(text=lambda text: isinstance(text, Comment)) - [s.extract() for s in comments] + def is_text_notcode(text): + length = len(text) + return (length > 20 and + text.count(' ') > length / 25 and + (text.find('{') == -1 or text.find(';') == -1)) + html = _scrape_strip_cruft(html) + html = _scrape_merge_paragraphs(html) + # extract all long text blocks that are not code try: - for tag in soup.findAll(True): - tag.name = 'p' # keep tag contents - - except Exception, e: - log.debug(u'Error {0} when replacing containing marker by p marker' - .format(e, exc_info=True)) - - # Make better soup from current soup! The previous unclosed <p> sections - # are now closed. Use str() rather than prettify() as it's more - # conservative concerning EOL - soup = BeautifulSoup(str(soup)) - - # In case lyrics are nested in no markup but <body> - # Insert the whole body in a <p> - bodyTag = soup.find('body') - if bodyTag: - pTag = soup.new_tag("p") - bodyTag.parent.insert(0, pTag) - pTag.insert(0, bodyTag) - - tagTokens = [] - - for tag in soup.findAll('p'): - soup2 = BeautifulSoup(str(tag)) - # Extract all text of <p> section. - tagTokens += soup2.findAll(text=True) - - if tagTokens: - # Lyrics are expected to be the longest paragraph - tagTokens = sorted(tagTokens, key=len, reverse=True) - soup = BeautifulSoup(tagTokens[0]) - return unescape(tagTokens[0].strip("\n\r: ")) - - -def fetch_google(artist, title): - """Fetch lyrics from Google search results. - """ - query = u"%s %s" % (artist, title) - api_key = config['lyrics']['google_API_key'].get(unicode) - engine_id = config['lyrics']['google_engine_ID'].get(unicode) - url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' % \ - (api_key, engine_id, urllib.quote(query.encode('utf8'))) - - data = urllib.urlopen(url) - data = json.load(data) - if 'error' in data: - reason = data['error']['errors'][0]['reason'] - log.debug(u'google lyrics backend error: {0}'.format(reason)) - return + soup = BeautifulSoup(html, "html.parser", + parse_only=SoupStrainer(text=is_text_notcode)) + except HTMLParseError: + return None - if 'items' in data.keys(): - for item in data['items']: - urlLink = item['link'] - urlTitle = item['title'] - if not is_page_candidate(urlLink, urlTitle, title, artist): - continue - lyrics = scrape_lyrics_from_url(urlLink) - if not lyrics: - continue + # Get the longest text element (if any). + strings = sorted(soup.stripped_strings, key=len, reverse=True) + if strings: + return strings[0] + else: + return None - lyrics = sanitize_lyrics(lyrics) - if is_lyrics(lyrics, artist): - log.debug(u'got lyrics from {0}'.format(item['displayLink'])) - return lyrics +class Google(Backend): + """Fetch lyrics from Google search results.""" + def __init__(self, config, log): + super(Google, self).__init__(config, log) + self.api_key = config['google_API_key'].get(unicode) + self.engine_id = config['google_engine_ID'].get(unicode) + def is_lyrics(self, text, artist=None): + """Determine whether the text seems to be valid lyrics. + """ + if not text: + return False + bad_triggers_occ = [] + nb_lines = text.count('\n') + if nb_lines <= 1: + self._log.debug(u"Ignoring too short lyrics '{0}'", text) + return False + elif nb_lines < 5: + bad_triggers_occ.append('too_short') + else: + # Lyrics look legit, remove credits to avoid being penalized + # further down + text = remove_credits(text) + + bad_triggers = ['lyrics', 'copyright', 'property', 'links'] + if artist: + bad_triggers_occ += [artist] + + for item in bad_triggers: + bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item, + text, re.I)) + + if bad_triggers_occ: + self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ) + return len(bad_triggers_occ) < 2 -# Plugin logic. + def slugify(self, text): + """Normalize a string and remove non-alphanumeric characters. + """ + text = re.sub(r"[-'_\s]", '_', text) + text = re.sub(r"_+", '_', text).strip('_') + pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses + text = re.sub(pat, '\g<1>', text).strip() + try: + text = unicodedata.normalize('NFKD', text).encode('ascii', + 'ignore') + text = unicode(re.sub('[-\s]+', ' ', text.decode('utf-8'))) + except UnicodeDecodeError: + self._log.exception(u"Failing to normalize '{0}'", text) + return text + + BY_TRANS = ['by', 'par', 'de', 'von'] + LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte'] + + def is_page_candidate(self, url_link, url_title, title, artist): + """Return True if the URL title makes it a good candidate to be a + page that contains lyrics of title by artist. + """ + title = self.slugify(title.lower()) + artist = self.slugify(artist.lower()) + sitename = re.search(u"//([^/]+)/.*", + self.slugify(url_link.lower())).group(1) + url_title = self.slugify(url_title.lower()) + + # Check if URL title contains song title (exact match) + if url_title.find(title) != -1: + return True + + # or try extracting song title from URL title and check if + # they are close enough + tokens = [by + '_' + artist for by in self.BY_TRANS] + \ + [artist, sitename, sitename.replace('www.', '')] + \ + self.LYRICS_TRANS + tokens = [re.escape(t) for t in tokens] + song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title) + + song_title = song_title.strip('_|') + typo_ratio = .9 + ratio = difflib.SequenceMatcher(None, song_title, title).ratio() + return ratio >= typo_ratio + + def fetch(self, artist, title): + query = u"%s %s" % (artist, title) + url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \ + % (self.api_key, self.engine_id, + urllib.quote(query.encode('utf8'))) + + data = urllib.urlopen(url) + data = json.load(data) + if 'error' in data: + reason = data['error']['errors'][0]['reason'] + self._log.debug(u'google lyrics backend error: {0}', reason) + return + if 'items' in data.keys(): + for item in data['items']: + url_link = item['link'] + url_title = item.get('title', u'') + if not self.is_page_candidate(url_link, url_title, + title, artist): + continue + html = self.fetch_url(url_link) + lyrics = scrape_lyrics_from_html(html) + if not lyrics: + continue + + if self.is_lyrics(lyrics, artist): + self._log.debug(u'got lyrics from {0}', + item['displayLink']) + return lyrics + + +class LyricsPlugin(plugins.BeetsPlugin): + SOURCES = ['google', 'lyricwiki', 'lyrics.com', 'musixmatch'] + SOURCE_BACKENDS = { + 'google': Google, + 'lyricwiki': LyricsWiki, + 'lyrics.com': LyricsCom, + 'musixmatch': MusiXmatch, + 'genius': Genius, + } -class LyricsPlugin(BeetsPlugin): def __init__(self): super(LyricsPlugin, self).__init__() self.import_stages = [self.imported] self.config.add({ 'auto': True, + 'bing_client_secret': None, + 'bing_lang_from': [], + 'bing_lang_to': None, 'google_API_key': None, 'google_engine_ID': u'009217259823014548361:lndtuqkycfu', + 'genius_api_key': + "Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W" + "76V-uFL5jks5dNvcGCdarqFjDhP9c", 'fallback': None, + 'force': False, + 'sources': self.SOURCES, }) - - self.backends = [fetch_lyricswiki, fetch_lyricscom] - - if self.config['google_API_key'].get(): - self.backends.insert(0, fetch_google) + self.config['bing_client_secret'].redact = True + self.config['google_API_key'].redact = True + self.config['google_engine_ID'].redact = True + self.config['genius_api_key'].redact = True + + available_sources = list(self.SOURCES) + sources = plugins.sanitize_choices( + self.config['sources'].as_str_seq(), available_sources) + + if 'google' in sources: + if not self.config['google_API_key'].get(): + # We log a *debug* message here because the default + # configuration includes `google`. This way, the source + # is silent by default but can be enabled just by + # setting an API key. + self._log.debug(u'Disabling google source: ' + u'no API key configured.') + sources.remove('google') + elif not HAS_BEAUTIFUL_SOUP: + self._log.warn(u'To use the google lyrics source, you must ' + u'install the beautifulsoup4 module. See the ' + u'documentation for further details.') + sources.remove('google') + + self.config['bing_lang_from'] = [ + x.lower() for x in self.config['bing_lang_from'].as_str_seq()] + self.bing_auth_token = None + + if not HAS_LANGDETECT and self.config['bing_client_secret'].get(): + self._log.warn(u'To use bing translations, you need to ' + u'install the langdetect module. See the ' + u'documentation for further details.') + + self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log) + for source in sources] + + def get_bing_access_token(self): + params = { + 'client_id': 'beets', + 'client_secret': self.config['bing_client_secret'], + 'scope': 'http://api.microsofttranslator.com', + 'grant_type': 'client_credentials', + } + + oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13' + oauth_token = json.loads(requests.post( + oauth_url, + data=urllib.urlencode(params)).content) + if 'access_token' in oauth_token: + return "Bearer " + oauth_token['access_token'] + else: + self._log.warning(u'Could not get Bing Translate API access token.' + u' Check your "bing_client_secret" password') def commands(self): cmd = ui.Subcommand('lyrics', help='fetch song lyrics') - cmd.parser.add_option('-p', '--print', dest='printlyr', - action='store_true', default=False, - help='print lyrics to console') - cmd.parser.add_option('-f', '--force', dest='force_refetch', - action='store_true', default=False, - help='always re-download lyrics') + cmd.parser.add_option( + u'-p', u'--print', dest='printlyr', + action='store_true', default=False, + help=u'print lyrics to console', + ) + cmd.parser.add_option( + u'-f', u'--force', dest='force_refetch', + action='store_true', default=False, + help=u'always re-download lyrics', + ) def func(lib, opts, args): # The "write to files" option corresponds to the # import_write config value. - write = config['import']['write'].get(bool) + write = ui.should_write() for item in lib.items(ui.decargs(args)): - self.fetch_item_lyrics(lib, logging.INFO, item, write, - opts.force_refetch) + self.fetch_item_lyrics( + lib, item, write, + opts.force_refetch or self.config['force'], + ) if opts.printlyr and item.lyrics: ui.print_(item.lyrics) @@ -503,19 +683,15 @@ """ if self.config['auto']: for item in task.imported_items(): - self.fetch_item_lyrics(session.lib, logging.DEBUG, item, - False, False) + self.fetch_item_lyrics(session.lib, item, + False, self.config['force']) - def fetch_item_lyrics(self, lib, loglevel, item, write, force): + def fetch_item_lyrics(self, lib, item, write, force): """Fetch and store lyrics for a single item. If ``write``, then the - lyrics will also be written to the file itself. The ``loglevel`` - parameter controls the visibility of the function's status log - messages. - """ + lyrics will also be written to the file itself.""" # Skip if the item already has lyrics. if not force and item.lyrics: - log.log(loglevel, u'lyrics already present: {0} - {1}' - .format(item.artist, item.title)) + self._log.info(u'lyrics already present: {0}', item) return lyrics = None @@ -527,19 +703,23 @@ lyrics = u"\n\n---\n\n".join([l for l in lyrics if l]) if lyrics: - log.log(loglevel, u'fetched lyrics: {0} - {1}' - .format(item.artist, item.title)) + self._log.info(u'fetched lyrics: {0}', item) + if HAS_LANGDETECT and self.config['bing_client_secret'].get(): + lang_from = langdetect.detect(lyrics) + if self.config['bing_lang_to'].get() != lang_from and ( + not self.config['bing_lang_from'] or ( + lang_from in self.config[ + 'bing_lang_from'].as_str_seq())): + lyrics = self.append_translation( + lyrics, self.config['bing_lang_to']) else: - log.log(loglevel, u'lyrics not found: {0} - {1}' - .format(item.artist, item.title)) + self._log.info(u'lyrics not found: {0}', item) fallback = self.config['fallback'].get() if fallback: lyrics = fallback else: return - item.lyrics = lyrics - if write: item.try_write() item.store() @@ -549,10 +729,35 @@ None if no lyrics were found. """ for backend in self.backends: - lyrics = backend(artist, title) + lyrics = backend.fetch(artist, title) if lyrics: - if isinstance(lyrics, str): - lyrics = lyrics.decode('utf8', 'ignore') - log.debug(u'got lyrics from backend: {0}' - .format(backend.__name__)) - return lyrics.strip() + self._log.debug(u'got lyrics from backend: {0}', + backend.__class__.__name__) + return _scrape_strip_cruft(lyrics, True) + + def append_translation(self, text, to_lang): + import xml.etree.ElementTree as ET + + if not self.bing_auth_token: + self.bing_auth_token = self.get_bing_access_token() + if self.bing_auth_token: + # Extract unique lines to limit API request size per song + text_lines = set(text.split('\n')) + url = ('http://api.microsofttranslator.com/v2/Http.svc/' + 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang)) + r = requests.get(url, + headers={"Authorization ": self.bing_auth_token}) + if r.status_code != 200: + self._log.debug('translation API error {}: {}', r.status_code, + r.text) + if 'token has expired' in r.text: + self.bing_auth_token = None + return self.append_translation(text, to_lang) + return text + lines_translated = ET.fromstring(r.text.encode('utf8')).text + # Use a translation mapping dict to build resulting lyrics + translations = dict(zip(text_lines, lines_translated.split('|'))) + result = '' + for line in text.split('\n'): + result += '%s / %s\n' % (line, translations[line]) + return result diff -Nru beets-1.3.8+dfsg/beetsplug/mbcollection.py beets-1.3.19/beetsplug/mbcollection.py --- beets-1.3.8+dfsg/beetsplug/mbcollection.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/mbcollection.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2011, Jeffrey Aylesworth <jeffrey@aylesworth.ca> # # Permission to use, copy, modify, and/or distribute this software for any @@ -12,7 +13,7 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import print_function +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin from beets.ui import Subcommand @@ -21,13 +22,10 @@ import musicbrainzngs import re -import logging SUBMISSION_CHUNK_SIZE = 200 UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$' -log = logging.getLogger('beets.bpd') - def mb_call(func, *args, **kwargs): """Call a MusicBrainz API function and catch exceptions. @@ -35,11 +33,11 @@ try: return func(*args, **kwargs) except musicbrainzngs.AuthenticationError: - raise ui.UserError('authentication with MusicBrainz failed') - except musicbrainzngs.ResponseError as exc: - raise ui.UserError('MusicBrainz API error: {0}'.format(exc)) + raise ui.UserError(u'authentication with MusicBrainz failed') + except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc: + raise ui.UserError(u'MusicBrainz API error: {0}'.format(exc)) except musicbrainzngs.UsageError: - raise ui.UserError('MusicBrainz credentials missing') + raise ui.UserError(u'MusicBrainz credentials missing') def submit_albums(collection_id, release_ids): @@ -54,43 +52,10 @@ ) -def update_album_list(album_list): - """Update the MusicBrainz colleciton from a list of Beets albums - """ - # Get the collection to modify. - collections = mb_call(musicbrainzngs.get_collections) - if not collections['collection-list']: - raise ui.UserError('no collections exist for user') - collection_id = collections['collection-list'][0]['id'] - - # Get a list of all the album IDs. - album_ids = [] - for album in album_list: - aid = album.mb_albumid - if aid: - if re.match(UUID_REGEX, aid): - album_ids.append(aid) - else: - log.info(u'skipping invalid MBID: {0}'.format(aid)) - - # Submit to MusicBrainz. - print('Updating MusicBrainz collection {0}...'.format(collection_id)) - submit_albums(collection_id, album_ids) - print('...MusicBrainz collection updated.') - - -def update_collection(lib, opts, args): - update_album_list(lib.albums()) - - -update_mb_collection_cmd = Subcommand('mbupdate', - help='Update MusicBrainz collection') -update_mb_collection_cmd.func = update_collection - - class MusicBrainzCollectionPlugin(BeetsPlugin): def __init__(self): super(MusicBrainzCollectionPlugin, self).__init__() + config['musicbrainz']['pass'].redact = True musicbrainzngs.auth( config['musicbrainz']['user'].get(unicode), config['musicbrainz']['pass'].get(unicode), @@ -100,10 +65,50 @@ self.import_stages = [self.imported] def commands(self): - return [update_mb_collection_cmd] + mbupdate = Subcommand('mbupdate', + help=u'Update MusicBrainz collection') + mbupdate.func = self.update_collection + return [mbupdate] + + def update_collection(self, lib, opts, args): + self.update_album_list(lib.albums()) def imported(self, session, task): """Add each imported album to the collection. """ if task.is_album: - update_album_list([task.album]) + self.update_album_list([task.album]) + + def update_album_list(self, album_list): + """Update the MusicBrainz colleciton from a list of Beets albums + """ + # Get the available collections. + collections = mb_call(musicbrainzngs.get_collections) + if not collections['collection-list']: + raise ui.UserError(u'no collections exist for user') + + # Get the first release collection. MusicBrainz also has event + # collections, so we need to avoid adding to those. + for collection in collections['collection-list']: + if 'release-count' in collection: + collection_id = collection['id'] + break + else: + raise ui.UserError(u'No collection found.') + + # Get a list of all the album IDs. + album_ids = [] + for album in album_list: + aid = album.mb_albumid + if aid: + if re.match(UUID_REGEX, aid): + album_ids.append(aid) + else: + self._log.info(u'skipping invalid MBID: {0}', aid) + + # Submit to MusicBrainz. + self._log.info( + u'Updating MusicBrainz collection {0}...', collection_id + ) + submit_albums(collection_id, album_ids) + self._log.info(u'...MusicBrainz collection updated.') diff -Nru beets-1.3.8+dfsg/beetsplug/mbsubmit.py beets-1.3.19/beetsplug/mbsubmit.py --- beets-1.3.8+dfsg/beetsplug/mbsubmit.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/mbsubmit.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson and Diego Moreda. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Aid in submitting information to MusicBrainz. + +This plugin allows the user to print track information in a format that is +parseable by the MusicBrainz track parser [1]. Programmatic submitting is not +implemented by MusicBrainz yet. + +[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings +""" + +from __future__ import division, absolute_import, print_function + + +from beets.autotag import Recommendation +from beets.plugins import BeetsPlugin +from beets.ui.commands import PromptChoice +from beetsplug.info import print_data + + +class MBSubmitPlugin(BeetsPlugin): + def __init__(self): + super(MBSubmitPlugin, self).__init__() + + self.config.add({ + 'format': '$track. $title - $artist ($length)', + 'threshold': 'medium', + }) + + # Validate and store threshold. + self.threshold = self.config['threshold'].as_choice({ + 'none': Recommendation.none, + 'low': Recommendation.low, + 'medium': Recommendation.medium, + 'strong': Recommendation.strong + }) + + self.register_listener('before_choose_candidate', + self.before_choose_candidate_event) + + def before_choose_candidate_event(self, session, task): + if task.rec <= self.threshold: + return [PromptChoice(u'p', u'Print tracks', self.print_tracks)] + + def print_tracks(self, session, task): + for i in task.items: + print_data(None, i, self.config['format'].get()) diff -Nru beets-1.3.8+dfsg/beetsplug/mbsync.py beets-1.3.19/beetsplug/mbsync.py --- beets-1.3.8+dfsg/beetsplug/mbsync.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/mbsync.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Jakob Schnitzer. +# Copyright 2016, Jakob Schnitzer. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,104 +15,13 @@ """Update library's tags using MusicBrainz. """ -import logging +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin from beets import autotag, library, ui, util from beets.autotag import hooks -from beets import config from collections import defaultdict -log = logging.getLogger('beets') - - -def mbsync_singletons(lib, query, move, pretend, write): - """Retrieve and apply info from the autotagger for items matched by - query. - """ - for item in lib.items(query + ['singleton:true']): - if not item.mb_trackid: - log.info(u'Skipping singleton {0}: has no mb_trackid' - .format(item.title)) - continue - - # Get the MusicBrainz recording info. - track_info = hooks.track_for_mbid(item.mb_trackid) - if not track_info: - log.info(u'Recording ID not found: {0}'.format(item.mb_trackid)) - continue - - # Apply. - with lib.transaction(): - autotag.apply_item_metadata(item, track_info) - apply_item_changes(lib, item, move, pretend, write) - - -def mbsync_albums(lib, query, move, pretend, write): - """Retrieve and apply info from the autotagger for albums matched by - query and their items. - """ - # Process matching albums. - for a in lib.albums(query): - if not a.mb_albumid: - log.info(u'Skipping album {0}: has no mb_albumid'.format(a.id)) - continue - - items = list(a.items()) - - # Get the MusicBrainz album information. - album_info = hooks.album_for_mbid(a.mb_albumid) - if not album_info: - log.info(u'Release ID not found: {0}'.format(a.mb_albumid)) - continue - - # Map recording MBIDs to their information. Recordings can appear - # multiple times on a release, so each MBID maps to a list of TrackInfo - # objects. - track_index = defaultdict(list) - for track_info in album_info.tracks: - track_index[track_info.track_id].append(track_info) - - # Construct a track mapping according to MBIDs. This should work - # for albums that have missing or extra tracks. If there are multiple - # copies of a recording, they are disambiguated using their disc and - # track number. - mapping = {} - for item in items: - candidates = track_index[item.mb_trackid] - if len(candidates) == 1: - mapping[item] = candidates[0] - else: - for c in candidates: - if c.medium_index == item.track and c.medium == item.disc: - mapping[item] = c - break - - # Apply. - with lib.transaction(): - autotag.apply_metadata(album_info, mapping) - changed = False - for item in items: - item_changed = ui.show_model_changes(item) - changed |= item_changed - if item_changed: - apply_item_changes(lib, item, move, pretend, write) - - if not changed: - # No change to any item. - continue - - if not pretend: - # Update album structure to reflect an item in it. - for key in library.Album.item_keys: - a[key] = items[0][key] - a.store() - - # Move album art (and any inconsistent items). - if move and lib.directory in util.ancestry(items[0].path): - log.debug(u'moving album {0}'.format(a.id)) - a.move() - def apply_item_changes(lib, item, move, pretend, write): """Store, move and write the item according to the arguments. @@ -126,32 +36,132 @@ item.store() -def mbsync_func(lib, opts, args): - """Command handler for the mbsync function. - """ - move = opts.move - pretend = opts.pretend - write = opts.write - query = ui.decargs(args) - - mbsync_singletons(lib, query, move, pretend, write) - mbsync_albums(lib, query, move, pretend, write) - - class MBSyncPlugin(BeetsPlugin): def __init__(self): super(MBSyncPlugin, self).__init__() def commands(self): cmd = ui.Subcommand('mbsync', - help='update metadata from musicbrainz') - cmd.parser.add_option('-p', '--pretend', action='store_true', - help='show all changes but do nothing') - cmd.parser.add_option('-M', '--nomove', action='store_false', - default=True, dest='move', - help="don't move files in library") - cmd.parser.add_option('-W', '--nowrite', action='store_false', - default=config['import']['write'], dest='write', - help="don't write updated metadata to files") - cmd.func = mbsync_func + help=u'update metadata from musicbrainz') + cmd.parser.add_option( + u'-p', u'--pretend', action='store_true', + help=u'show all changes but do nothing') + cmd.parser.add_option( + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory") + cmd.parser.add_option( + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library") + cmd.parser.add_option( + u'-W', u'--nowrite', action='store_false', + default=None, dest='write', + help=u"don't write updated metadata to files") + cmd.parser.add_format_option() + cmd.func = self.func return [cmd] + + def func(self, lib, opts, args): + """Command handler for the mbsync function. + """ + move = ui.should_move(opts.move) + pretend = opts.pretend + write = ui.should_write(opts.write) + query = ui.decargs(args) + + self.singletons(lib, query, move, pretend, write) + self.albums(lib, query, move, pretend, write) + + def singletons(self, lib, query, move, pretend, write): + """Retrieve and apply info from the autotagger for items matched by + query. + """ + for item in lib.items(query + [u'singleton:true']): + item_formatted = format(item) + if not item.mb_trackid: + self._log.info(u'Skipping singleton with no mb_trackid: {0}', + item_formatted) + continue + + # Get the MusicBrainz recording info. + track_info = hooks.track_for_mbid(item.mb_trackid) + if not track_info: + self._log.info(u'Recording ID not found: {0} for track {0}', + item.mb_trackid, + item_formatted) + continue + + # Apply. + with lib.transaction(): + autotag.apply_item_metadata(item, track_info) + apply_item_changes(lib, item, move, pretend, write) + + def albums(self, lib, query, move, pretend, write): + """Retrieve and apply info from the autotagger for albums matched by + query and their items. + """ + # Process matching albums. + for a in lib.albums(query): + album_formatted = format(a) + if not a.mb_albumid: + self._log.info(u'Skipping album with no mb_albumid: {0}', + album_formatted) + continue + + items = list(a.items()) + + # Get the MusicBrainz album information. + album_info = hooks.album_for_mbid(a.mb_albumid) + if not album_info: + self._log.info(u'Release ID {0} not found for album {1}', + a.mb_albumid, + album_formatted) + continue + + # Map recording MBIDs to their information. Recordings can appear + # multiple times on a release, so each MBID maps to a list of + # TrackInfo objects. + track_index = defaultdict(list) + for track_info in album_info.tracks: + track_index[track_info.track_id].append(track_info) + + # Construct a track mapping according to MBIDs. This should work + # for albums that have missing or extra tracks. If there are + # multiple copies of a recording, they are disambiguated using + # their disc and track number. + mapping = {} + for item in items: + candidates = track_index[item.mb_trackid] + if len(candidates) == 1: + mapping[item] = candidates[0] + else: + for c in candidates: + if (c.medium_index == item.track and + c.medium == item.disc): + mapping[item] = c + break + + # Apply. + self._log.debug(u'applying changes to {}', album_formatted) + with lib.transaction(): + autotag.apply_metadata(album_info, mapping) + changed = False + for item in items: + item_changed = ui.show_model_changes(item) + changed |= item_changed + if item_changed: + apply_item_changes(lib, item, move, pretend, write) + + if not changed: + # No change to any item. + continue + + if not pretend: + # Update album structure to reflect an item in it. + for key in library.Album.item_keys: + a[key] = items[0][key] + a.store() + + # Move album art (and any inconsistent items). + if move and lib.directory in util.ancestry(items[0].path): + self._log.debug(u'moving album {0}', album_formatted) + a.move() diff -Nru beets-1.3.8+dfsg/beetsplug/metasync/amarok.py beets-1.3.19/beetsplug/metasync/amarok.py --- beets-1.3.8+dfsg/beetsplug/metasync/amarok.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/metasync/amarok.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Heinz Wiesinger. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from amarok's library via dbus +""" + +from __future__ import division, absolute_import, print_function + +from os.path import basename +from datetime import datetime +from time import mktime +from xml.sax.saxutils import escape + +from beets.util import displayable_path +from beets.dbcore import types +from beets.library import DateType +from beetsplug.metasync import MetaSource + + +def import_dbus(): + try: + return __import__('dbus') + except ImportError: + return None + +dbus = import_dbus() + + +class Amarok(MetaSource): + + item_types = { + 'amarok_rating': types.INTEGER, + 'amarok_score': types.FLOAT, + 'amarok_uid': types.STRING, + 'amarok_playcount': types.INTEGER, + 'amarok_firstplayed': DateType(), + 'amarok_lastplayed': DateType(), + } + + queryXML = u'<query version="1.0"> \ + <filters> \ + <and><include field="filename" value="%s" /></and> \ + </filters> \ + </query>' + + def __init__(self, config, log): + super(Amarok, self).__init__(config, log) + + if not dbus: + raise ImportError('failed to import dbus') + + self.collection = \ + dbus.SessionBus().get_object('org.kde.amarok', '/Collection') + + def sync_from_source(self, item): + path = displayable_path(item.path) + + # amarok unfortunately doesn't allow searching for the full path, only + # for the patch relative to the mount point. But the full path is part + # of the result set. So query for the filename and then try to match + # the correct item from the results we get back + results = self.collection.Query(self.queryXML % escape(basename(path))) + for result in results: + if result['xesam:url'] != path: + continue + + item.amarok_rating = result['xesam:userRating'] + item.amarok_score = result['xesam:autoRating'] + item.amarok_playcount = result['xesam:useCount'] + item.amarok_uid = \ + result['xesam:id'].replace('amarok-sqltrackuid://', '') + + if result['xesam:firstUsed'][0][0] != 0: + # These dates are stored as timestamps in amarok's db, but + # exposed over dbus as fixed integers in the current timezone. + first_played = datetime( + result['xesam:firstUsed'][0][0], + result['xesam:firstUsed'][0][1], + result['xesam:firstUsed'][0][2], + result['xesam:firstUsed'][1][0], + result['xesam:firstUsed'][1][1], + result['xesam:firstUsed'][1][2] + ) + + if result['xesam:lastUsed'][0][0] != 0: + last_played = datetime( + result['xesam:lastUsed'][0][0], + result['xesam:lastUsed'][0][1], + result['xesam:lastUsed'][0][2], + result['xesam:lastUsed'][1][0], + result['xesam:lastUsed'][1][1], + result['xesam:lastUsed'][1][2] + ) + else: + last_played = first_played + + item.amarok_firstplayed = mktime(first_played.timetuple()) + item.amarok_lastplayed = mktime(last_played.timetuple()) diff -Nru beets-1.3.8+dfsg/beetsplug/metasync/__init__.py beets-1.3.19/beetsplug/metasync/__init__.py --- beets-1.3.8+dfsg/beetsplug/metasync/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/metasync/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Heinz Wiesinger. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from music player libraries +""" + +from __future__ import division, absolute_import, print_function + +from abc import abstractmethod, ABCMeta +from importlib import import_module + +from beets.util.confit import ConfigValueError +from beets import ui +from beets.plugins import BeetsPlugin + + +METASYNC_MODULE = 'beetsplug.metasync' + +# Dictionary to map the MODULE and the CLASS NAME of meta sources +SOURCES = { + 'amarok': 'Amarok', + 'itunes': 'Itunes', +} + + +class MetaSource(object): + __metaclass__ = ABCMeta + + def __init__(self, config, log): + self.item_types = {} + self.config = config + self._log = log + + @abstractmethod + def sync_from_source(self, item): + pass + + +def load_meta_sources(): + """ Returns a dictionary of all the MetaSources + E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true + """ + meta_sources = {} + + for module_path, class_name in SOURCES.items(): + module = import_module(METASYNC_MODULE + '.' + module_path) + meta_sources[class_name.lower()] = getattr(module, class_name) + + return meta_sources + + +META_SOURCES = load_meta_sources() + + +def load_item_types(): + """ Returns a dictionary containing the item_types of all the MetaSources + """ + item_types = {} + for meta_source in META_SOURCES.values(): + item_types.update(meta_source.item_types) + return item_types + + +class MetaSyncPlugin(BeetsPlugin): + + item_types = load_item_types() + + def __init__(self): + super(MetaSyncPlugin, self).__init__() + + def commands(self): + cmd = ui.Subcommand('metasync', + help='update metadata from music player libraries') + cmd.parser.add_option('-p', '--pretend', action='store_true', + help='show all changes but do nothing') + cmd.parser.add_option('-s', '--source', default=[], + action='append', dest='sources', + help='comma-separated list of sources to sync') + cmd.parser.add_format_option() + cmd.func = self.func + return [cmd] + + def func(self, lib, opts, args): + """Command handler for the metasync function. + """ + pretend = opts.pretend + query = ui.decargs(args) + + sources = [] + for source in opts.sources: + sources.extend(source.split(',')) + + sources = sources or self.config['source'].as_str_seq() + + meta_source_instances = {} + items = lib.items(query) + + # Avoid needlessly instantiating meta sources (can be expensive) + if not items: + self._log.info(u'No items found matching query') + return + + # Instantiate the meta sources + for player in sources: + try: + cls = META_SOURCES[player] + except KeyError: + self._log.error(u'Unknown metadata source \'{0}\''.format( + player)) + + try: + meta_source_instances[player] = cls(self.config, self._log) + except (ImportError, ConfigValueError) as e: + self._log.error(u'Failed to instantiate metadata source ' + u'\'{0}\': {1}'.format(player, e)) + + # Avoid needlessly iterating over items + if not meta_source_instances: + self._log.error(u'No valid metadata sources found') + return + + # Sync the items with all of the meta sources + for item in items: + for meta_source in meta_source_instances.values(): + meta_source.sync_from_source(item) + + changed = ui.show_model_changes(item) + + if changed and not pretend: + item.store() diff -Nru beets-1.3.8+dfsg/beetsplug/metasync/itunes.py beets-1.3.19/beetsplug/metasync/itunes.py --- beets-1.3.8+dfsg/beetsplug/metasync/itunes.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/metasync/itunes.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Tom Jaspers. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from iTunes's library +""" + +from __future__ import division, absolute_import, print_function + +from contextlib import contextmanager +import os +import shutil +import tempfile +import plistlib +import urllib +from urlparse import urlparse +from time import mktime + +from beets import util +from beets.dbcore import types +from beets.library import DateType +from beets.util.confit import ConfigValueError +from beetsplug.metasync import MetaSource + + +@contextmanager +def create_temporary_copy(path): + temp_dir = tempfile.mkdtemp() + temp_path = os.path.join(temp_dir, 'temp_itunes_lib') + shutil.copyfile(path, temp_path) + try: + yield temp_path + finally: + shutil.rmtree(temp_dir) + + +def _norm_itunes_path(path): + # Itunes prepends the location with 'file://' on posix systems, + # and with 'file://localhost/' on Windows systems. + # The actual path to the file is always saved as posix form + # E.g., 'file://Users/Music/bar' or 'file://localhost/G:/Music/bar' + + # The entire path will also be capitalized (e.g., '/Music/Alt-J') + # Note that this means the path will always have a leading separator, + # which is unwanted in the case of Windows systems. + # E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar' + + return util.bytestring_path(os.path.normpath( + urllib.unquote(urlparse(path).path)).lstrip('\\')).lower() + + +class Itunes(MetaSource): + + item_types = { + 'itunes_rating': types.INTEGER, # 0..100 scale + 'itunes_playcount': types.INTEGER, + 'itunes_skipcount': types.INTEGER, + 'itunes_lastplayed': DateType(), + 'itunes_lastskipped': DateType(), + } + + def __init__(self, config, log): + super(Itunes, self).__init__(config, log) + + config.add({'itunes': { + 'library': '~/Music/iTunes/iTunes Library.xml' + }}) + + # Load the iTunes library, which has to be the .xml one (not the .itl) + library_path = config['itunes']['library'].as_filename() + + try: + self._log.debug( + u'loading iTunes library from {0}'.format(library_path)) + with create_temporary_copy(library_path) as library_copy: + raw_library = plistlib.readPlist(library_copy) + except IOError as e: + raise ConfigValueError(u'invalid iTunes library: ' + e.strerror) + except Exception: + # It's likely the user configured their '.itl' library (<> xml) + if os.path.splitext(library_path)[1].lower() != '.xml': + hint = u': please ensure that the configured path' \ + u' points to the .XML library' + else: + hint = '' + raise ConfigValueError(u'invalid iTunes library' + hint) + + # Make the iTunes library queryable using the path + self.collection = {_norm_itunes_path(track['Location']): track + for track in raw_library['Tracks'].values() + if 'Location' in track} + + def sync_from_source(self, item): + result = self.collection.get(util.bytestring_path(item.path).lower()) + + if not result: + self._log.warning(u'no iTunes match found for {0}'.format(item)) + return + + item.itunes_rating = result.get('Rating') + item.itunes_playcount = result.get('Play Count') + item.itunes_skipcount = result.get('Skip Count') + + if result.get('Play Date UTC'): + item.itunes_lastplayed = mktime( + result.get('Play Date UTC').timetuple()) + + if result.get('Skip Date'): + item.itunes_lastskipped = mktime( + result.get('Skip Date').timetuple()) diff -Nru beets-1.3.8+dfsg/beetsplug/missing.py beets-1.3.19/beetsplug/missing.py --- beets-1.3.8+dfsg/beetsplug/missing.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/missing.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Pedro Silva. +# Copyright 2016, Pedro Silva. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,40 +15,19 @@ """List missing tracks. """ -import logging +from __future__ import division, absolute_import, print_function from beets.autotag import hooks from beets.library import Item from beets.plugins import BeetsPlugin -from beets.ui import decargs, print_obj, Subcommand - -PLUGIN = 'missing' -log = logging.getLogger('beets') +from beets.ui import decargs, print_, Subcommand +from beets import config def _missing_count(album): """Return number of missing items in `album`. """ - return (album.tracktotal or 0) - len(album.items()) - - -def _missing(album): - """Query MusicBrainz to determine items missing from `album`. - """ - item_mbids = map(lambda x: x.mb_trackid, album.items()) - - if len([i for i in album.items()]) < album.tracktotal: - # fetch missing items - # TODO: Implement caching that without breaking other stuff - album_info = hooks.album_for_mbid(album.mb_albumid) - for track_info in getattr(album_info, 'tracks', []): - if track_info.track_id not in item_mbids: - item = _item(track_info, album_info, album.id) - log.debug(u'{0}: track {1} in album {2}' - .format(PLUGIN, - track_info.track_id, - album_info.album_id)) - yield item + return (album.albumtotal or 0) - len(album.items()) def _item(track_info, album_info, album_id): @@ -88,7 +68,7 @@ 'mb_artistid': t.artist_id, 'mb_releasegroupid': a.releasegroup_id, 'mb_trackid': t.track_id, - 'media': a.media, + 'media': t.media, 'month': a.month, 'script': a.script, 'title': t.title, @@ -105,7 +85,6 @@ super(MissingPlugin, self).__init__() self.config.add({ - 'format': None, 'count': False, 'total': False, }) @@ -115,26 +94,20 @@ self._command = Subcommand('missing', help=__doc__, aliases=['miss']) - - self._command.parser.add_option('-f', '--format', dest='format', - action='store', type='string', - help='print with custom FORMAT', - metavar='FORMAT') - - self._command.parser.add_option('-c', '--count', dest='count', - action='store_true', - help='count missing tracks per album') - - self._command.parser.add_option('-t', '--total', dest='total', - action='store_true', - help='count total of missing tracks') + self._command.parser.add_option( + u'-c', u'--count', dest='count', action='store_true', + help=u'count missing tracks per album') + self._command.parser.add_option( + u'-t', u'--total', dest='total', action='store_true', + help=u'count total of missing tracks') + self._command.parser.add_format_option() def commands(self): def _miss(lib, opts, args): self.config.set_args(opts) - fmt = self.config['format'].get() count = self.config['count'].get() total = self.config['total'].get() + fmt = config['format_album' if count else 'format_item'].get() albums = lib.albums(decargs(args)) if total: @@ -142,18 +115,32 @@ return # Default format string for count mode. - if count and not fmt: - fmt = '$albumartist - $album: $missing' + if count: + fmt += ': $missing' for album in albums: if count: - missing = _missing_count(album) - if missing: - print_obj(album, lib, fmt=fmt) + if _missing_count(album): + print_(format(album, fmt)) else: - for item in _missing(album): - print_obj(item, lib, fmt=fmt) + for item in self._missing(album): + print_(format(item, fmt)) self._command.func = _miss return [self._command] + + def _missing(self, album): + """Query MusicBrainz to determine items missing from `album`. + """ + item_mbids = [x.mb_trackid for x in album.items()] + if len([i for i in album.items()]) < album.albumtotal: + # fetch missing items + # TODO: Implement caching that without breaking other stuff + album_info = hooks.album_for_mbid(album.mb_albumid) + for track_info in getattr(album_info, 'tracks', []): + if track_info.track_id not in item_mbids: + item = _item(track_info, album_info, album.id) + self._log.debug(u'track {0} in album {1}', + track_info.track_id, album_info.album_id) + yield item diff -Nru beets-1.3.8+dfsg/beetsplug/mpdstats.py beets-1.3.19/beetsplug/mpdstats.py --- beets-1.3.8+dfsg/beetsplug/mpdstats.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/beetsplug/mpdstats.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,6 +1,6 @@ -# coding=utf-8 +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Peter Schnebel and Johann Klähn. +# Copyright 2016, Peter Schnebel and Johann Klähn. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,7 +13,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -import logging +from __future__ import division, absolute_import, print_function + import mpd import socket import select @@ -27,17 +28,20 @@ from beets.util import displayable_path from beets.dbcore import types -log = logging.getLogger('beets') - # If we lose the connection, how many times do we want to retry and how # much time should we wait between retries? RETRIES = 10 RETRY_INTERVAL = 5 +mpd_config = config['mpd'] + + def is_url(path): """Try to determine if the path is an URL. """ + if isinstance(path, bytes): # if it's bytes, then it's a path + return False return path.split('://', 1)[0] in ['http', 'https'] @@ -56,34 +60,36 @@ class MPDClientWrapper(object): - def __init__(self): + def __init__(self, log): + self._log = log + self.music_directory = ( - config['mpdstats']['music_directory'].get(unicode)) + mpd_config['music_directory'].get(unicode)) self.client = MPDClient() def connect(self): """Connect to the MPD. """ - host = config['mpd']['host'].get(unicode) - port = config['mpd']['port'].get(int) + host = mpd_config['host'].get(unicode) + port = mpd_config['port'].get(int) if host[0] in ['/', '~']: host = os.path.expanduser(host) - log.info(u'mpdstats: connecting to {0}:{1}'.format(host, port)) + self._log.info(u'connecting to {0}:{1}', host, port) try: self.client.connect(host, port) except socket.error as e: - raise ui.UserError('could not connect to MPD: {0}'.format(e)) + raise ui.UserError(u'could not connect to MPD: {0}'.format(e)) - password = config['mpd']['password'].get(unicode) + password = mpd_config['password'].get(unicode) if password: try: self.client.password(password) except mpd.CommandError as e: raise ui.UserError( - 'could not authenticate to MPD: {0}'.format(e) + u'could not authenticate to MPD: {0}'.format(e) ) def disconnect(self): @@ -99,7 +105,7 @@ try: return getattr(self.client, command)() except (select.error, mpd.ConnectionError) as err: - log.error(u'mpdstats: {0}'.format(err)) + self._log.error(u'{0}', err) if retries <= 0: # if we exited without breaking, we couldn't reconnect in time :( @@ -141,15 +147,16 @@ class MPDStats(object): - def __init__(self, lib): + def __init__(self, lib, log): self.lib = lib + self._log = log - self.do_rating = config['mpdstats']['rating'].get(bool) - self.rating_mix = config['mpdstats']['rating_mix'].get(float) + self.do_rating = mpd_config['rating'].get(bool) + self.rating_mix = mpd_config['rating_mix'].get(float) self.time_threshold = 10.0 # TODO: maybe add config option? self.now_playing = None - self.mpd = MPDClientWrapper() + self.mpd = MPDClientWrapper(log) def rating(self, play_count, skip_count, rating, skipped): """Calculate a new rating for a song based on play count, skip count, @@ -160,8 +167,8 @@ else: rolling = (rating + (1.0 - rating) / 2.0) stable = (play_count + 1.0) / (play_count + skip_count + 2.0) - return (self.rating_mix * stable - + (1.0 - self.rating_mix) * rolling) + return (self.rating_mix * stable + + (1.0 - self.rating_mix) * rolling) def get_item(self, path): """Return the beets item related to path. @@ -171,12 +178,9 @@ if item: return item else: - log.info(u'mpdstats: item not found: {0}'.format( - displayable_path(path) - )) + self._log.info(u'item not found: {0}', displayable_path(path)) - @staticmethod - def update_item(item, attribute, value=None, increment=None): + def update_item(self, item, attribute, value=None, increment=None): """Update the beets item. Set attribute to value or increment the value of attribute. If the increment argument is used the value is cast to the corresponding type. @@ -192,15 +196,18 @@ item[attribute] = value item.store() - log.debug(u'mpdstats: updated: {0} = {1} [{2}]'.format( - attribute, - item[attribute], - displayable_path(item.path), - )) + self._log.debug(u'updated: {0} = {1} [{2}]', + attribute, + item[attribute], + displayable_path(item.path)) def update_rating(self, item, skipped): - """Update the rating for a beets item. + """Update the rating for a beets item. The `item` can either be a + beets `Item` or None. If the item is None, nothing changes. """ + if item is None: + return + item.load() rating = self.rating( int(item.get('play_count', 0)), @@ -215,6 +222,8 @@ To this end the difference between the song's supposed end time and the current time is calculated. If it's greater than a threshold, the song is considered skipped. + + Returns whether the change was manual (skipped previous song or not) """ diff = abs(song['remaining'] - (time.time() - song['started'])) @@ -228,24 +237,22 @@ if self.do_rating: self.update_rating(song['beets_item'], skipped) + return skipped + def handle_played(self, song): """Updates the play count of a song. """ self.update_item(song['beets_item'], 'play_count', increment=1) - log.info(u'mpdstats: played {0}'.format( - displayable_path(song['path']) - )) + self._log.info(u'played {0}', displayable_path(song['path'])) def handle_skipped(self, song): """Updates the skip count of a song. """ self.update_item(song['beets_item'], 'skip_count', increment=1) - log.info(u'mpdstats: skipped {0}'.format( - displayable_path(song['path']) - )) + self._log.info(u'skipped {0}', displayable_path(song['path'])) def on_stop(self, status): - log.info(u'mpdstats: stop') + self._log.info(u'stop') if self.now_playing: self.handle_song_change(self.now_playing) @@ -253,7 +260,7 @@ self.now_playing = None def on_pause(self, status): - log.info(u'mpdstats: pause') + self._log.info(u'pause') self.now_playing = None def on_play(self, status): @@ -264,30 +271,31 @@ return if is_url(path): - log.info(u'mpdstats: playing stream {0}'.format( - displayable_path(path) - )) + self._log.info(u'playing stream {0}', displayable_path(path)) return played, duration = map(int, status['time'].split(':', 1)) remaining = duration - played if self.now_playing and self.now_playing['path'] != path: - self.handle_song_change(self.now_playing) + skipped = self.handle_song_change(self.now_playing) + # mpd responds twice on a natural new song start + going_to_happen_twice = not skipped + else: + going_to_happen_twice = False + + if not going_to_happen_twice: + self._log.info(u'playing {0}', displayable_path(path)) - log.info(u'mpdstats: playing {0}'.format( - displayable_path(path) - )) - - self.now_playing = { - 'started': time.time(), - 'remaining': remaining, - 'path': path, - 'beets_item': self.get_item(path), - } + self.now_playing = { + 'started': time.time(), + 'remaining': remaining, + 'path': path, + 'beets_item': self.get_item(path), + } - self.update_item(self.now_playing['beets_item'], - 'last_played', value=int(time.time())) + self.update_item(self.now_playing['beets_item'], + 'last_played', value=int(time.time())) def run(self): self.mpd.connect() @@ -302,8 +310,7 @@ if handler: handler(status) else: - log.debug(u'mpdstats: unhandled status "{0}"'. - format(status)) + self._log.debug(u'unhandled status "{0}"', status) events = self.mpd.events() @@ -313,50 +320,49 @@ item_types = { 'play_count': types.INTEGER, 'skip_count': types.INTEGER, - 'last_played': library.Date(), + 'last_played': library.DateType(), 'rating': types.FLOAT, } def __init__(self): super(MPDStatsPlugin, self).__init__() - self.config.add({ + mpd_config.add({ 'music_directory': config['directory'].as_filename(), 'rating': True, 'rating_mix': 0.75, - }) - config['mpd'].add({ 'host': u'localhost', 'port': 6600, 'password': u'', }) + mpd_config['password'].redact = True def commands(self): cmd = ui.Subcommand( 'mpdstats', - help='run a MPD client to gather play statistics') + help=u'run a MPD client to gather play statistics') cmd.parser.add_option( - '--host', dest='host', type='string', - help='set the hostname of the server to connect to') + u'--host', dest='host', type='string', + help=u'set the hostname of the server to connect to') cmd.parser.add_option( - '--port', dest='port', type='int', - help='set the port of the MPD server to connect to') + u'--port', dest='port', type='int', + help=u'set the port of the MPD server to connect to') cmd.parser.add_option( - '--password', dest='password', type='string', - help='set the password of the MPD server to connect to') + u'--password', dest='password', type='string', + help=u'set the password of the MPD server to connect to') def func(lib, opts, args): - self.config.set_args(opts) + mpd_config.set_args(opts) # Overrides for MPD settings. if opts.host: - config['mpd']['host'] = opts.host.decode('utf8') + mpd_config['host'] = opts.host.decode('utf8') if opts.port: - config['mpd']['host'] = int(opts.port) + mpd_config['host'] = int(opts.port) if opts.password: - config['mpd']['password'] = opts.password.decode('utf8') + mpd_config['password'] = opts.password.decode('utf8') try: - MPDStats(lib).run() + MPDStats(lib, self._log).run() except KeyboardInterrupt: pass diff -Nru beets-1.3.8+dfsg/beetsplug/mpdupdate.py beets-1.3.19/beetsplug/mpdupdate.py --- beets-1.3.8+dfsg/beetsplug/mpdupdate.py 2014-04-14 00:39:49.000000000 +0000 +++ beets-1.3.19/beetsplug/mpdupdate.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -20,17 +21,13 @@ port: 6600 password: seekrit """ -from __future__ import print_function +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin import os import socket from beets import config -# Global variable so that mpdupdate can detect database changes and run only -# once before beets exits. -database_changed = False - # No need to introduce a dependency on an MPD library for such a # simple use case. Here's a simple socket abstraction to make things @@ -66,37 +63,6 @@ self.sock.close() -def update_mpd(host='localhost', port=6600, password=None): - """Sends the "update" command to the MPD server indicated, - possibly authenticating with a password first. - """ - print('Updating MPD database...') - - s = BufferedSocket(host, port) - resp = s.readline() - if 'OK MPD' not in resp: - print('MPD connection failed:', repr(resp)) - return - - if password: - s.send('password "%s"\n' % password) - resp = s.readline() - if 'OK' not in resp: - print('Authentication failed:', repr(resp)) - s.send('close\n') - s.close() - return - - s.send('update\n') - resp = s.readline() - if 'updating_db' not in resp: - print('Update failed:', repr(resp)) - - s.send('close\n') - s.close() - print('... updated.') - - class MPDUpdatePlugin(BeetsPlugin): def __init__(self): super(MPDUpdatePlugin, self).__init__() @@ -105,6 +71,7 @@ 'port': 6600, 'password': u'', }) + config['mpd']['password'].redact = True # For backwards compatibility, use any values from the # plugin-specific "mpdupdate" section. @@ -112,18 +79,50 @@ if self.config[key].exists(): config['mpd'][key] = self.config[key].get() + self.register_listener('database_change', self.db_change) -@MPDUpdatePlugin.listen('database_change') -def handle_change(lib=None): - global database_changed - database_changed = True + def db_change(self, lib, model): + self.register_listener('cli_exit', self.update) - -@MPDUpdatePlugin.listen('cli_exit') -def update(lib=None): - if database_changed: - update_mpd( + def update(self, lib): + self.update_mpd( config['mpd']['host'].get(unicode), config['mpd']['port'].get(int), config['mpd']['password'].get(unicode), ) + + def update_mpd(self, host='localhost', port=6600, password=None): + """Sends the "update" command to the MPD server indicated, + possibly authenticating with a password first. + """ + self._log.info('Updating MPD database...') + + try: + s = BufferedSocket(host, port) + except socket.error as e: + self._log.warning(u'MPD connection failed: {0}', + unicode(e.strerror)) + return + + resp = s.readline() + if 'OK MPD' not in resp: + self._log.warning(u'MPD connection failed: {0!r}', resp) + return + + if password: + s.send('password "%s"\n' % password) + resp = s.readline() + if 'OK' not in resp: + self._log.warning(u'Authentication failed: {0!r}', resp) + s.send('close\n') + s.close() + return + + s.send('update\n') + resp = s.readline() + if 'updating_db' not in resp: + self._log.warning(u'Update failed: {0!r}', resp) + + s.send('close\n') + s.close() + self._log.info(u'Database updated.') diff -Nru beets-1.3.8+dfsg/beetsplug/permissions.py beets-1.3.19/beetsplug/permissions.py --- beets-1.3.8+dfsg/beetsplug/permissions.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/permissions.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + +"""Fixes file permissions after the file gets written on import. Put something +like the following in your config.yaml to configure: + + permissions: + file: 644 + dir: 755 +""" +import os +from beets import config, util +from beets.plugins import BeetsPlugin +from beets.util import ancestry + + +def convert_perm(perm): + """If the perm is a int then just return it, otherwise convert it to oct. + """ + if isinstance(perm, int): + return perm + else: + return int(perm, 8) + + +def check_permissions(path, permission): + """Checks the permissions of a path. + """ + return oct(os.stat(path).st_mode & 0o777) == oct(permission) + + +def dirs_in_library(library, item): + """Creates a list of ancestor directories in the beets library path. + """ + return [ancestor + for ancestor in ancestry(item) + if ancestor.startswith(library)][1:] + + +class Permissions(BeetsPlugin): + def __init__(self): + super(Permissions, self).__init__() + + # Adding defaults. + self.config.add({ + u'file': '644', + u'dir': '755' + }) + + self.register_listener('item_imported', permissions) + self.register_listener('album_imported', permissions) + + +def permissions(lib, item=None, album=None): + """Running the permission fixer. + """ + # Getting the config. + file_perm = config['permissions']['file'].get() + dir_perm = config['permissions']['dir'].get() + + # Converts permissions to oct. + file_perm = convert_perm(file_perm) + dir_perm = convert_perm(dir_perm) + + # Create chmod_queue. + file_chmod_queue = [] + if item: + file_chmod_queue.append(item.path) + elif album: + for album_item in album.items(): + file_chmod_queue.append(album_item.path) + + # A set of directories to change permissions for. + dir_chmod_queue = set() + + for path in file_chmod_queue: + # Changing permissions on the destination file. + os.chmod(util.bytestring_path(path), file_perm) + + # Checks if the destination path has the permissions configured. + if not check_permissions(util.bytestring_path(path), file_perm): + message = u'There was a problem setting permission on {}'.format( + path) + print(message) + + # Adding directories to the directory chmod queue. + dir_chmod_queue.update( + dirs_in_library(lib.directory, + path)) + + # Change permissions for the directories. + for path in dir_chmod_queue: + # Chaning permissions on the destination directory. + os.chmod(util.bytestring_path(path), dir_perm) + + # Checks if the destination path has the permissions configured. + if not check_permissions(util.bytestring_path(path), dir_perm): + message = u'There was a problem setting permission on {}'.format( + path) + print(message) diff -Nru beets-1.3.8+dfsg/beetsplug/play.py beets-1.3.19/beetsplug/play.py --- beets-1.3.8+dfsg/beetsplug/play.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/beetsplug/play.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, David Hamp-Gonsalves +# Copyright 2016, David Hamp-Gonsalves # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,96 +15,19 @@ """Send the results of a query to the configured music player as a playlist. """ +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin from beets.ui import Subcommand from beets import config from beets import ui from beets import util from os.path import relpath -import platform -import logging -import shlex from tempfile import NamedTemporaryFile -log = logging.getLogger('beets') - - -def play_music(lib, opts, args): - """Execute query, create temporary playlist and execute player - command passing that playlist. - """ - command_str = config['play']['command'].get() - use_folders = config['play']['use_folders'].get(bool) - relative_to = config['play']['relative_to'].get() - if relative_to: - relative_to = util.normpath(relative_to) - if command_str: - command = shlex.split(command_str) - else: - # If a command isn't set, then let the OS decide how to open the - # playlist. - sys_name = platform.system() - if sys_name == 'Darwin': - command = ['open'] - elif sys_name == 'Windows': - command = ['start'] - else: - # If not Mac or Windows, then assume Unixy. - command = ['xdg-open'] - - # Preform search by album and add folders rather then tracks to playlist. - if opts.album: - selection = lib.albums(ui.decargs(args)) - paths = [] - - for album in selection: - if use_folders: - paths.append(album.item_dir()) - else: - # TODO use core's sorting functionality - paths.extend([item.path for item in sorted( - album.items(), key=lambda item: (item.disc, item.track))]) - item_type = 'album' - - # Preform item query and add tracks to playlist. - else: - selection = lib.items(ui.decargs(args)) - paths = [item.path for item in selection] - item_type = 'track' - - item_type += 's' if len(selection) > 1 else '' - - if not selection: - ui.print_(ui.colorize('yellow', 'No {0} to play.'.format(item_type))) - return - - # Warn user before playing any huge playlists. - if len(selection) > 100: - ui.print_(ui.colorize( - 'yellow', - 'You are about to queue {0} {1}.'.format(len(selection), item_type) - )) - - if ui.input_options(('Continue', 'Abort')) == 'a': - return - - # Create temporary m3u file to hold our playlist. - m3u = NamedTemporaryFile('w', suffix='.m3u', delete=False) - for item in paths: - if relative_to: - m3u.write(relpath(item, relative_to) + '\n') - else: - m3u.write(item + '\n') - m3u.close() - - command.append(m3u.name) - - # Invoke the command and log the output. - output = util.command_output(command) - if output: - log.debug(u'Output of {0}: {1}'.format(command[0], output)) - - ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type)) +# Indicate where arguments should be inserted into the command string. +# If this is missing, they're placed at the end. +ARGS_MARKER = '$args' class PlayPlugin(BeetsPlugin): @@ -115,17 +39,117 @@ 'command': None, 'use_folders': False, 'relative_to': None, + 'raw': False, + # Backwards compatibility. See #1803 and line 74 + 'warning_threshold': -2, + 'warning_treshold': 100, }) def commands(self): play_command = Subcommand( 'play', - help='send music to a player as a playlist' + help=u'send music to a player as a playlist' ) + play_command.parser.add_album_option() play_command.parser.add_option( - '-a', '--album', - action='store_true', default=False, - help='query and load albums rather than tracks' + u'-A', u'--args', + action='store', + help=u'add additional arguments to the command', ) - play_command.func = play_music + play_command.func = self.play_music return [play_command] + + def play_music(self, lib, opts, args): + """Execute query, create temporary playlist and execute player + command passing that playlist, at request insert optional arguments. + """ + command_str = config['play']['command'].get() + if not command_str: + command_str = util.open_anything() + use_folders = config['play']['use_folders'].get(bool) + relative_to = config['play']['relative_to'].get() + raw = config['play']['raw'].get(bool) + warning_threshold = config['play']['warning_threshold'].get(int) + # We use -2 as a default value for warning_threshold to detect if it is + # set or not. We can't use a falsey value because it would have an + # actual meaning in the configuration of this plugin, and we do not use + # -1 because some people might use it as a value to obtain no warning, + # which wouldn't be that bad of a practice. + if warning_threshold == -2: + # if warning_threshold has not been set by user, look for + # warning_treshold, to preserve backwards compatibility. See #1803. + # warning_treshold has the correct default value of 100. + warning_threshold = config['play']['warning_treshold'].get(int) + + if relative_to: + relative_to = util.normpath(relative_to) + + # Add optional arguments to the player command. + if opts.args: + if ARGS_MARKER in command_str: + command_str = command_str.replace(ARGS_MARKER, opts.args) + else: + command_str = u"{} {}".format(command_str, opts.args) + + # Perform search by album and add folders rather than tracks to + # playlist. + if opts.album: + selection = lib.albums(ui.decargs(args)) + paths = [] + + sort = lib.get_default_album_sort() + for album in selection: + if use_folders: + paths.append(album.item_dir()) + else: + paths.extend(item.path + for item in sort.sort(album.items())) + item_type = 'album' + + # Perform item query and add tracks to playlist. + else: + selection = lib.items(ui.decargs(args)) + paths = [item.path for item in selection] + if relative_to: + paths = [relpath(path, relative_to) for path in paths] + item_type = 'track' + + item_type += 's' if len(selection) > 1 else '' + + if not selection: + ui.print_(ui.colorize('text_warning', + u'No {0} to play.'.format(item_type))) + return + + # Warn user before playing any huge playlists. + if warning_threshold and len(selection) > warning_threshold: + ui.print_(ui.colorize( + 'text_warning', + u'You are about to queue {0} {1}.'.format( + len(selection), item_type))) + + if ui.input_options(('Continue', 'Abort')) == 'a': + return + + ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type)) + if raw: + open_args = paths + else: + open_args = [self._create_tmp_playlist(paths)] + + self._log.debug(u'executing command: {} {}', command_str, + b' '.join(open_args)) + try: + util.interactive_open(open_args, command_str) + except OSError as exc: + raise ui.UserError( + "Could not play the query: {0}".format(exc)) + + def _create_tmp_playlist(self, paths_list): + """Create a temporary .m3u file. Return the filename. + """ + m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False) + for item in paths_list: + m3u.write(item + b'\n') + m3u.close() + return m3u.name diff -Nru beets-1.3.8+dfsg/beetsplug/plexupdate.py beets-1.3.19/beetsplug/plexupdate.py --- beets-1.3.8+dfsg/beetsplug/plexupdate.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/plexupdate.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- + +"""Updates an Plex library whenever the beets library is changed. + +Plex Home users enter the Plex Token to enable updating. +Put something like the following in your config.yaml to configure: + plex: + host: localhost + port: 32400 + token: token +""" +from __future__ import division, absolute_import, print_function + +import requests +from urlparse import urljoin +from urllib import urlencode +import xml.etree.ElementTree as ET +from beets import config +from beets.plugins import BeetsPlugin + + +def get_music_section(host, port, token, library_name): + """Getting the section key for the music library in Plex. + """ + api_endpoint = append_token('library/sections', token) + url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint) + + # Sends request. + r = requests.get(url) + + # Parse xml tree and extract music section key. + tree = ET.fromstring(r.content) + for child in tree.findall('Directory'): + if child.get('title') == library_name: + return child.get('key') + + +def update_plex(host, port, token, library_name): + """Sends request to the Plex api to start a library refresh. + """ + # Getting section key and build url. + section_key = get_music_section(host, port, token, library_name) + api_endpoint = 'library/sections/{0}/refresh'.format(section_key) + api_endpoint = append_token(api_endpoint, token) + url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint) + + # Sends request and returns requests object. + r = requests.get(url) + return r + + +def append_token(url, token): + """Appends the Plex Home token to the api call if required. + """ + if token: + url += '?' + urlencode({'X-Plex-Token': token}) + return url + + +class PlexUpdate(BeetsPlugin): + def __init__(self): + super(PlexUpdate, self).__init__() + + # Adding defaults. + config['plex'].add({ + u'host': u'localhost', + u'port': 32400, + u'token': u'', + u'library_name': u'Music'}) + + self.register_listener('database_change', self.listen_for_db_change) + + def listen_for_db_change(self, lib, model): + """Listens for beets db change and register the update for the end""" + self.register_listener('cli_exit', self.update) + + def update(self, lib): + """When the client exists try to send refresh request to Plex server. + """ + self._log.info(u'Updating Plex library...') + + # Try to send update request. + try: + update_plex( + config['plex']['host'].get(), + config['plex']['port'].get(), + config['plex']['token'].get(), + config['plex']['library_name'].get()) + self._log.info(u'... started.') + + except requests.exceptions.RequestException: + self._log.warning(u'Update failed.') diff -Nru beets-1.3.8+dfsg/beetsplug/random.py beets-1.3.19/beetsplug/random.py --- beets-1.3.8+dfsg/beetsplug/random.py 2014-04-14 00:51:02.000000000 +0000 +++ beets-1.3.19/beetsplug/random.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Philippe Mongeau. +# Copyright 2016, Philippe Mongeau. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,10 +15,10 @@ """Get a random song or album from the library. """ -from __future__ import absolute_import +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, decargs, print_obj -from beets.util.functemplate import Template +from beets.ui import Subcommand, decargs, print_ import random from operator import attrgetter from itertools import groupby @@ -25,11 +26,6 @@ def random_item(lib, opts, args): query = decargs(args) - if opts.path: - fmt = '$path' - else: - fmt = opts.format - template = Template(fmt) if fmt else None if opts.album: objs = list(lib.albums(query)) @@ -66,20 +62,17 @@ objs = random.sample(objs, number) for item in objs: - print_obj(item, lib, template) + print_(format(item)) random_cmd = Subcommand('random', - help='chose a random track or album') -random_cmd.parser.add_option('-a', '--album', action='store_true', - help='choose an album instead of track') -random_cmd.parser.add_option('-p', '--path', action='store_true', - help='print the path of the matched item') -random_cmd.parser.add_option('-f', '--format', action='store', - help='print with custom format', default=None) -random_cmd.parser.add_option('-n', '--number', action='store', type="int", - help='number of objects to choose', default=1) -random_cmd.parser.add_option('-e', '--equal-chance', action='store_true', - help='each artist has the same chance') + help=u'chose a random track or album') +random_cmd.parser.add_option( + u'-n', u'--number', action='store', type="int", + help=u'number of objects to choose', default=1) +random_cmd.parser.add_option( + u'-e', u'--equal-chance', action='store_true', + help=u'each artist has the same chance') +random_cmd.parser.add_all_common_options() random_cmd.func = random_item diff -Nru beets-1.3.8+dfsg/beetsplug/replaygain.py beets-1.3.19/beetsplug/replaygain.py --- beets-1.3.8+dfsg/beetsplug/replaygain.py 2014-09-15 17:24:37.000000000 +0000 +++ beets-1.3.19/beetsplug/replaygain.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson. +# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,20 +13,20 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -import logging +from __future__ import division, absolute_import, print_function + import subprocess import os import collections import itertools import sys import warnings +import re +from beets import logging from beets import ui from beets.plugins import BeetsPlugin from beets.util import syspath, command_output, displayable_path -from beets import config - -log = logging.getLogger('beets.replaygain') # Utilities. @@ -41,6 +42,11 @@ """ +class FatalGstreamerPluginReplayGainError(FatalReplayGainError): + """Raised when a fatal error occurs in the GStreamerBackend when + loading the required plugins.""" + + def call(args): """Execute the command and return its output or raise a ReplayGainError on failure. @@ -49,13 +55,13 @@ return command_output(args) except subprocess.CalledProcessError as e: raise ReplayGainError( - "{0} exited with status {1}".format(args[0], e.returncode) + u"{0} exited with status {1}".format(args[0], e.returncode) ) except UnicodeEncodeError: # Due to a bug in Python 2's subprocess on Windows, Unicode # filenames can fail to encode on that platform. See: # http://code.google.com/p/beets/issues/detail?id=499 - raise ReplayGainError("argument encoding failed") + raise ReplayGainError(u"argument encoding failed") # Backend base and plumbing classes. @@ -67,10 +73,12 @@ class Backend(object): """An abstract class representing engine for calculating RG values. """ - def __init__(self, config): + + def __init__(self, config, log): """Initialize the backend with the configuration view for the plugin. """ + self._log = log def compute_track_gain(self, items): raise NotImplementedError() @@ -81,11 +89,168 @@ raise NotImplementedError() -# mpgain/aacgain CLI tool backend. +# bsg1770gain backend +class Bs1770gainBackend(Backend): + """bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and + its flavors EBU R128, ATSC A/85 and Replaygain 2.0. + """ + + def __init__(self, config, log): + super(Bs1770gainBackend, self).__init__(config, log) + config.add({ + 'chunk_at': 5000, + 'method': 'replaygain', + }) + self.chunk_at = config['chunk_at'].as_number() + self.method = b'--' + bytes(config['method'].get(unicode)) + + cmd = b'bs1770gain' + try: + call([cmd, self.method]) + self.command = cmd + except OSError: + raise FatalReplayGainError( + u'Is bs1770gain installed? Is your method in config correct?' + ) + if not self.command: + raise FatalReplayGainError( + u'no replaygain command found: install bs1770gain' + ) + + def compute_track_gain(self, items): + """Computes the track gain of the given tracks, returns a list + of TrackGain objects. + """ + + output = self.compute_gain(items, False) + return output + + def compute_album_gain(self, album): + """Computes the album gain of the given album, returns an + AlbumGain object. + """ + # TODO: What should be done when not all tracks in the album are + # supported? + + supported_items = album.items() + output = self.compute_gain(supported_items, True) + + if not output: + raise ReplayGainError(u'no output from bs1770gain') + return AlbumGain(output[-1], output[:-1]) + + def isplitter(self, items, chunk_at): + """Break an iterable into chunks of at most size `chunk_at`, + generating lists for each chunk. + """ + iterable = iter(items) + while True: + result = [] + for i in range(chunk_at): + try: + a = next(iterable) + except StopIteration: + break + else: + result.append(a) + if result: + yield result + else: + break + + def compute_gain(self, items, is_album): + """Computes the track or album gain of a list of items, returns + a list of TrackGain objects. + When computing album gain, the last TrackGain object returned is + the album gain + """ + + if len(items) == 0: + return [] + albumgaintot = 0.0 + albumpeaktot = 0.0 + returnchunks = [] + + # In the case of very large sets of music, we break the tracks + # into smaller chunks and process them one at a time. This + # avoids running out of memory. + if len(items) > self.chunk_at: + i = 0 + for chunk in self.isplitter(items, self.chunk_at): + i += 1 + returnchunk = self.compute_chunk_gain(chunk, is_album) + albumgaintot += returnchunk[-1].gain + albumpeaktot += returnchunk[-1].peak + returnchunks = returnchunks + returnchunk[0:-1] + returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i)) + return returnchunks + else: + return self.compute_chunk_gain(items, is_album) + + def compute_chunk_gain(self, items, is_album): + """Compute ReplayGain values and return a list of results + dictionaries as given by `parse_tool_output`. + """ + # Construct shell command. + cmd = [self.command] + cmd = cmd + [self.method] + cmd = cmd + [b'-p'] + + # Workaround for Windows: the underlying tool fails on paths + # with the \\?\ prefix, so we don't use it here. This + # prevents the backend from working with long paths. + args = cmd + [syspath(i.path, prefix=False) for i in items] + + # Invoke the command. + self._log.debug( + u'executing {0}', u' '.join(map(displayable_path, args)) + ) + output = call(args) + + self._log.debug(u'analysis finished: {0}', output) + results = self.parse_tool_output(output, + len(items) + is_album) + self._log.debug(u'{0} items, {1} results', len(items), len(results)) + return results + + def parse_tool_output(self, text, num_lines): + """Given the output from bs1770gain, parse the text and + return a list of dictionaries + containing information about each analyzed file. + """ + out = [] + data = text.decode('utf8', errors='ignore') + regex = re.compile( + u'(\\s{2,2}\\[\\d+\\/\\d+\\].*?|\\[ALBUM\\].*?)' + '(?=\\s{2,2}\\[\\d+\\/\\d+\\]|\\s{2,2}\\[ALBUM\\]' + ':|done\\.\\s)', re.DOTALL | re.UNICODE) + results = re.findall(regex, data) + for parts in results[0:num_lines]: + part = parts.split(b'\n') + if len(part) == 0: + self._log.debug(u'bad tool output: {0!r}', text) + raise ReplayGainError(u'bs1770gain failed') + + try: + song = { + 'file': part[0], + 'gain': float((part[1].split('/'))[1].split('LU')[0]), + 'peak': float(part[2].split('/')[1]), + } + except IndexError: + self._log.info(u'bs1770gain reports (faulty file?): {}', parts) + continue + + out.append(Gain(song['gain'], song['peak'])) + return out + +# mpgain/aacgain CLI tool backend. class CommandBackend(Backend): - def __init__(self, config): + + def __init__(self, config, log): + super(CommandBackend, self).__init__(config, log) config.add({ 'command': u"", 'noclip': True, @@ -97,21 +262,20 @@ # Explicit executable path. if not os.path.isfile(self.command): raise FatalReplayGainError( - 'replaygain command does not exist: {0}'.format( - self.command - ) + u'replaygain command does not exist: {0}'.format( + self.command) ) else: # Check whether the program is in $PATH. - for cmd in ('mp3gain', 'aacgain'): + for cmd in (b'mp3gain', b'aacgain'): try: - call([cmd, '-v']) + call([cmd, b'-v']) self.command = cmd except OSError: pass if not self.command: raise FatalReplayGainError( - 'no replaygain command found: install mp3gain or aacgain' + u'no replaygain command found: install mp3gain or aacgain' ) self.noclip = config['noclip'].get(bool) @@ -122,7 +286,7 @@ """Computes the track gain of the given tracks, returns a list of TrackGain objects. """ - supported_items = filter(self.format_supported, items) + supported_items = list(filter(self.format_supported, items)) output = self.compute_gain(supported_items, False) return output @@ -133,9 +297,9 @@ # TODO: What should be done when not all tracks in the album are # supported? - supported_items = filter(self.format_supported, album.items()) + supported_items = list(filter(self.format_supported, album.items())) if len(supported_items) != len(album.items()): - log.debug(u'replaygain: tracks are of unsupported format') + self._log.debug(u'tracks are of unsupported format') return AlbumGain(None, []) output = self.compute_gain(supported_items, True) @@ -158,6 +322,7 @@ the album gain """ if len(items) == 0: + self._log.debug(u'no supported tracks to analyze') return [] """Compute ReplayGain values and return a list of results @@ -169,26 +334,22 @@ # tag-writing; this turns the mp3gain/aacgain tool into a gain # calculator rather than a tag manipulator because we take care # of changing tags ourselves. - cmd = [self.command, '-o', '-s', 's'] + cmd = [self.command, b'-o', b'-s', b's'] if self.noclip: # Adjust to avoid clipping. - cmd = cmd + ['-k'] + cmd = cmd + [b'-k'] else: # Disable clipping warning. - cmd = cmd + ['-c'] - cmd = cmd + ['-a' if is_album else '-r'] - cmd = cmd + ['-d', str(self.gain_offset)] + cmd = cmd + [b'-c'] + cmd = cmd + [b'-d', bytes(self.gain_offset)] cmd = cmd + [syspath(i.path) for i in items] - log.debug(u'replaygain: analyzing {0} files'.format(len(items))) - log.debug(u"replaygain: executing {0}" - .format(" ".join(map(displayable_path, cmd)))) + self._log.debug(u'analyzing {0} files', len(items)) + self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd))) output = call(cmd) - log.debug(u'replaygain: analysis finished') - results = self.parse_tool_output(output, - len(items) + (1 if is_album else 0)) - - return results + self._log.debug(u'analysis finished') + return self.parse_tool_output(output, + len(items) + (1 if is_album else 0)) def parse_tool_output(self, text, num_lines): """Given the tab-delimited output from an invocation of mp3gain @@ -196,11 +357,11 @@ containing information about each analyzed file. """ out = [] - for line in text.split('\n')[1:num_lines + 1]: - parts = line.split('\t') - if len(parts) != 6 or parts[0] == 'File': - log.debug(u'replaygain: bad tool output: {0}'.format(text)) - raise ReplayGainError('mp3gain failed') + for line in text.split(b'\n')[1:num_lines + 1]: + parts = line.split(b'\t') + if len(parts) != 6 or parts[0] == b'File': + self._log.debug(u'bad tool output: {0}', text) + raise ReplayGainError(u'mp3gain failed') d = { 'file': parts[0], 'mp3gain': int(parts[1]), @@ -216,8 +377,10 @@ # GStreamer-based backend. -class GStreamerBackend(object): - def __init__(self, config): +class GStreamerBackend(Backend): + + def __init__(self, config, log): + super(GStreamerBackend, self).__init__(config, log) self._import_gst() # Initialized a GStreamer pipeline of the form filesrc -> @@ -231,6 +394,12 @@ self._res = self.Gst.ElementFactory.make("audioresample", "res") self._rg = self.Gst.ElementFactory.make("rganalysis", "rg") + if self._src is None or self._decbin is None or self._conv is None \ + or self._res is None or self._rg is None: + raise FatalGstreamerPluginReplayGainError( + u"Failed to load required GStreamer plugins" + ) + # We check which files need gain ourselves, so all files given # to rganalsys should have their gain computed, even if it # already exists. @@ -273,20 +442,26 @@ try: import gi - gi.require_version('Gst', '1.0') + except ImportError: + raise FatalReplayGainError( + u"Failed to load GStreamer: python-gi not found" + ) - from gi.repository import GObject, Gst, GLib - # Calling GObject.threads_init() is not needed for - # PyGObject 3.10.2+ - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - GObject.threads_init() - Gst.init([sys.argv[0]]) - except: + try: + gi.require_version('Gst', '1.0') + except ValueError as e: raise FatalReplayGainError( - "Failed to load GStreamer; check that python-gi is installed" + u"Failed to load GStreamer 1.0: {0}".format(e) ) + from gi.repository import GObject, Gst, GLib + # Calling GObject.threads_init() is not needed for + # PyGObject 3.10.2+ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + GObject.threads_init() + Gst.init([sys.argv[0]]) + self.GObject = GObject self.GLib = GLib self.Gst = Gst @@ -311,7 +486,7 @@ def compute_track_gain(self, items): self.compute(items, False) if len(self._file_tags) != len(items): - raise ReplayGainError("Some tracks did not receive tags") + raise ReplayGainError(u"Some tracks did not receive tags") ret = [] for item in items: @@ -324,16 +499,27 @@ items = list(album.items()) self.compute(items, True) if len(self._file_tags) != len(items): - raise ReplayGainError("Some items in album did not receive tags") + raise ReplayGainError(u"Some items in album did not receive tags") - ret = [] + # Collect track gains. + track_gains = [] for item in items: - ret.append(Gain(self._file_tags[item]["TRACK_GAIN"], - self._file_tags[item]["TRACK_PEAK"])) + try: + gain = self._file_tags[item]["TRACK_GAIN"] + peak = self._file_tags[item]["TRACK_PEAK"] + except KeyError: + raise ReplayGainError(u"results missing for track") + track_gains.append(Gain(gain, peak)) + # Get album gain information from the last track. last_tags = self._file_tags[items[-1]] - return AlbumGain(Gain(last_tags["ALBUM_GAIN"], - last_tags["ALBUM_PEAK"]), ret) + try: + gain = last_tags["ALBUM_GAIN"] + peak = last_tags["ALBUM_PEAK"] + except KeyError: + raise ReplayGainError(u"results missing for album") + + return AlbumGain(Gain(gain, peak), track_gains) def close(self): self._bus.remove_signal_watch() @@ -352,10 +538,9 @@ err, debug = message.parse_error() f = self._src.get_property("location") # A GStreamer error, either an unsupported format or a bug. - self._error = \ - ReplayGainError(u"Error {0} - {1} on file {2}".format(err, - debug, - f)) + self._error = ReplayGainError( + u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f) + ) def _on_tag(self, bus, message): tags = message.parse_tag() @@ -457,6 +642,148 @@ assert(peer is None) +class AudioToolsBackend(Backend): + """ReplayGain backend that uses `Python Audio Tools + <http://audiotools.sourceforge.net/>`_ and its capabilities to read more + file formats and compute ReplayGain values using it replaygain module. + """ + + def __init__(self, config, log): + super(AudioToolsBackend, self).__init__(config, log) + self._import_audiotools() + + def _import_audiotools(self): + """Check whether it's possible to import the necessary modules. + There is no check on the file formats at runtime. + + :raises :exc:`ReplayGainError`: if the modules cannot be imported + """ + try: + import audiotools + import audiotools.replaygain + except ImportError: + raise FatalReplayGainError( + u"Failed to load audiotools: audiotools not found" + ) + self._mod_audiotools = audiotools + self._mod_replaygain = audiotools.replaygain + + def open_audio_file(self, item): + """Open the file to read the PCM stream from the using + ``item.path``. + + :return: the audiofile instance + :rtype: :class:`audiotools.AudioFile` + :raises :exc:`ReplayGainError`: if the file is not found or the + file format is not supported + """ + try: + audiofile = self._mod_audiotools.open(item.path) + except IOError: + raise ReplayGainError( + u"File {} was not found".format(item.path) + ) + except self._mod_audiotools.UnsupportedFile: + raise ReplayGainError( + u"Unsupported file type {}".format(item.format) + ) + + return audiofile + + def init_replaygain(self, audiofile, item): + """Return an initialized :class:`audiotools.replaygain.ReplayGain` + instance, which requires the sample rate of the song(s) on which + the ReplayGain values will be computed. The item is passed in case + the sample rate is invalid to log the stored item sample rate. + + :return: initialized replagain object + :rtype: :class:`audiotools.replaygain.ReplayGain` + :raises: :exc:`ReplayGainError` if the sample rate is invalid + """ + try: + rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate()) + except ValueError: + raise ReplayGainError( + u"Unsupported sample rate {}".format(item.samplerate)) + return + return rg + + def compute_track_gain(self, items): + """Compute ReplayGain values for the requested items. + + :return list: list of :class:`Gain` objects + """ + return [self._compute_track_gain(item) for item in items] + + def _title_gain(self, rg, audiofile): + """Get the gain result pair from PyAudioTools using the `ReplayGain` + instance `rg` for the given `audiofile`. + + Wraps `rg.title_gain(audiofile.to_pcm())` and throws a + `ReplayGainError` when the library fails. + """ + try: + # The method needs an audiotools.PCMReader instance that can + # be obtained from an audiofile instance. + return rg.title_gain(audiofile.to_pcm()) + except ValueError as exc: + # `audiotools.replaygain` can raise a `ValueError` if the sample + # rate is incorrect. + self._log.debug(u'error in rg.title_gain() call: {}', exc) + raise ReplayGainError(u'audiotools audio data error') + + def _compute_track_gain(self, item): + """Compute ReplayGain value for the requested item. + + :rtype: :class:`Gain` + """ + audiofile = self.open_audio_file(item) + rg = self.init_replaygain(audiofile, item) + + # Each call to title_gain on a ReplayGain object returns peak and gain + # of the track. + rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile) + + self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}', + item.artist, item.title, rg_track_gain, rg_track_peak) + return Gain(gain=rg_track_gain, peak=rg_track_peak) + + def compute_album_gain(self, album): + """Compute ReplayGain values for the requested album and its items. + + :rtype: :class:`AlbumGain` + """ + self._log.debug(u'Analysing album {0}', album) + + # The first item is taken and opened to get the sample rate to + # initialize the replaygain object. The object is used for all the + # tracks in the album to get the album values. + item = list(album.items())[0] + audiofile = self.open_audio_file(item) + rg = self.init_replaygain(audiofile, item) + + track_gains = [] + for item in album.items(): + audiofile = self.open_audio_file(item) + rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile) + track_gains.append( + Gain(gain=rg_track_gain, peak=rg_track_peak) + ) + self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}', + item, rg_track_gain, rg_track_peak) + + # After getting the values for all tracks, it's possible to get the + # album values. + rg_album_gain, rg_album_peak = rg.album_gain() + self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}', + album, rg_album_gain, rg_album_peak) + + return AlbumGain( + Gain(gain=rg_album_gain, peak=rg_album_peak), + track_gains=track_gains + ) + + # Main plugin logic. class ReplayGainPlugin(BeetsPlugin): @@ -464,13 +791,14 @@ """ backends = { - "command": CommandBackend, + "command": CommandBackend, "gstreamer": GStreamerBackend, + "audiotools": AudioToolsBackend, + "bs1770gain": Bs1770gainBackend } def __init__(self): super(ReplayGainPlugin, self).__init__() - self.import_stages = [self.imported] # default backend is 'command' for backward-compatibility. self.config.add({ @@ -481,7 +809,6 @@ }) self.overwrite = self.config['overwrite'].get(bool) - self.automatic = self.config['auto'].get(bool) backend_name = self.config['backend'].get(unicode) if backend_name not in self.backends: raise ui.UserError( @@ -492,14 +819,17 @@ ) ) + # On-import analysis. + if self.config['auto']: + self.import_stages = [self.imported] + try: self.backend_instance = self.backends[backend_name]( - self.config + self.config, self._log ) except (ReplayGainError, FatalReplayGainError) as e: raise ui.UserError( - 'An error occurred in backend initialization: {0}'.format(e) - ) + u'replaygain initialization failed: {0}'.format(e)) def track_requires_gain(self, item): return self.overwrite or \ @@ -519,19 +849,16 @@ item.rg_track_peak = track_gain.peak item.store() - log.debug(u'replaygain: applied track gain {0}, peak {1}'.format( - item.rg_track_gain, - item.rg_track_peak - )) + self._log.debug(u'applied track gain {0}, peak {1}', + item.rg_track_gain, item.rg_track_peak) def store_album_gain(self, album, album_gain): album.rg_album_gain = album_gain.gain album.rg_album_peak = album_gain.peak album.store() - log.debug(u'replaygain: applied album gain {0}, peak {1}'.format( - album.rg_album_gain, - album.rg_album_peak)) + self._log.debug(u'applied album gain {0}, peak {1}', + album.rg_album_gain, album.rg_album_peak) def handle_album(self, album, write): """Compute album and track replay gain store it in all of the @@ -542,21 +869,17 @@ items, nothing is done. """ if not self.album_requires_gain(album): - log.info(u'Skipping album {0} - {1}'.format(album.albumartist, - album.album)) + self._log.info(u'Skipping album {0}', album) return - log.info(u'analyzing {0} - {1}'.format(album.albumartist, - album.album)) + self._log.info(u'analyzing {0}', album) try: album_gain = self.backend_instance.compute_album_gain(album) if len(album_gain.track_gains) != len(album.items()): raise ReplayGainError( u"ReplayGain backend failed " - u"for some tracks in album {0} - {1}".format( - album.albumartist, album.album - ) + u"for some tracks in album {0}".format(album) ) self.store_album_gain(album, album_gain.album_gain) @@ -566,11 +889,10 @@ if write: item.try_write() except ReplayGainError as e: - log.info(u"ReplayGain error: {0}".format(e)) + self._log.info(u"ReplayGain error: {0}", e) except FatalReplayGainError as e: raise ui.UserError( - u"Fatal replay gain error: {0}".format(e) - ) + u"Fatal replay gain error: {0}".format(e)) def handle_track(self, item, write): """Compute track replay gain and store it in the item. @@ -580,40 +902,30 @@ in the item, nothing is done. """ if not self.track_requires_gain(item): - log.info(u'Skipping track {0} - {1}' - .format(item.artist, item.title)) + self._log.info(u'Skipping track {0}', item) return - log.info(u'analyzing {0} - {1}' - .format(item.artist, item.title)) + self._log.info(u'analyzing {0}', item) try: track_gains = self.backend_instance.compute_track_gain([item]) if len(track_gains) != 1: raise ReplayGainError( - u"ReplayGain backend failed for track {0} - {1}".format( - item.artist, item.title - ) + u"ReplayGain backend failed for track {0}".format(item) ) self.store_track_gain(item, track_gains[0]) if write: item.try_write() except ReplayGainError as e: - log.info(u"ReplayGain error: {0}".format(e)) + self._log.info(u"ReplayGain error: {0}", e) except FatalReplayGainError as e: raise ui.UserError( - u"Fatal replay gain error: {0}".format(e) - ) + u"Fatal replay gain error: {0}".format(e)) def imported(self, session, task): """Add replay gain info to items or albums of ``task``. """ - if not self.automatic: - return - - log.setLevel(logging.WARN) - if task.is_album: self.handle_album(task.album, False) else: @@ -623,9 +935,9 @@ """Return the "replaygain" ui subcommand. """ def func(lib, opts, args): - log.setLevel(logging.INFO) + self._log.setLevel(logging.INFO) - write = config['import']['write'].get(bool) + write = ui.should_write() if opts.album: for album in lib.albums(ui.decargs(args)): @@ -635,8 +947,7 @@ for item in lib.items(ui.decargs(args)): self.handle_track(item, write) - cmd = ui.Subcommand('replaygain', help='analyze for ReplayGain') - cmd.parser.add_option('-a', '--album', action='store_true', - help='analyze albums instead of tracks') + cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain') + cmd.parser.add_album_option() cmd.func = func return [cmd] diff -Nru beets-1.3.8+dfsg/beetsplug/rewrite.py beets-1.3.19/beetsplug/rewrite.py --- beets-1.3.8+dfsg/beetsplug/rewrite.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/rewrite.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,16 +16,15 @@ """Uses user-specified rewriting rules to canonicalize names for path formats. """ +from __future__ import division, absolute_import, print_function + import re -import logging from collections import defaultdict from beets.plugins import BeetsPlugin from beets import ui from beets import library -log = logging.getLogger('beets') - def rewriter(field, rules): """Create a template field function that rewrites the given field @@ -55,11 +55,11 @@ try: fieldname, pattern = key.split(None, 1) except ValueError: - raise ui.UserError("invalid rewrite specification") + raise ui.UserError(u"invalid rewrite specification") if fieldname not in library.Item._fields: - raise ui.UserError("invalid field name (%s) in rewriter" % + raise ui.UserError(u"invalid field name (%s) in rewriter" % fieldname) - log.debug(u'adding template field {0}'.format(key)) + self._log.debug(u'adding template field {0}', key) pattern = re.compile(pattern.lower()) rules[fieldname].append((pattern, value)) if fieldname == 'artist': @@ -69,4 +69,7 @@ # Replace each template field with the new rewriter function. for fieldname, fieldrules in rules.iteritems(): - self.template_fields[fieldname] = rewriter(fieldname, fieldrules) + getter = rewriter(fieldname, fieldrules) + self.template_fields[fieldname] = getter + if fieldname in library.Album._fields: + self.album_template_fields[fieldname] = getter diff -Nru beets-1.3.8+dfsg/beetsplug/scrub.py beets-1.3.19/beetsplug/scrub.py --- beets-1.3.8+dfsg/beetsplug/scrub.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/beetsplug/scrub.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,7 +16,8 @@ """Cleans extraneous metadata from files' tags via a command or automatically whenever tags are written. """ -import logging + +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin from beets import ui @@ -23,8 +25,6 @@ from beets import config from beets import mediafile -log = logging.getLogger('beets') - _MUTAGEN_FORMATS = { 'asf': 'ASF', 'apev2': 'APEv2File', @@ -44,9 +44,6 @@ } -scrubbing = False - - class ScrubPlugin(BeetsPlugin): """Removes extraneous metadata from files' tags.""" def __init__(self): @@ -55,91 +52,95 @@ 'auto': True, }) + if self.config['auto']: + self.register_listener("import_task_files", self.import_task_files) + def commands(self): def scrub_func(lib, opts, args): - # This is a little bit hacky, but we set a global flag to - # avoid autoscrubbing when we're also explicitly scrubbing. - global scrubbing - scrubbing = True - # Walk through matching files and remove tags. for item in lib.items(ui.decargs(args)): - log.info(u'scrubbing: {0}'.format( - util.displayable_path(item.path))) - - # Get album art if we need to restore it. - if opts.write: - mf = mediafile.MediaFile(item.path, - config['id3v23'].get(bool)) - art = mf.art - - # Remove all tags. - _scrub(item.path) - - # Restore tags, if enabled. - if opts.write: - log.debug(u'writing new tags after scrub') - item.try_write() - if art: - log.info(u'restoring art') - mf = mediafile.MediaFile(item.path) - mf.art = art - mf.save() - - scrubbing = False - - scrub_cmd = ui.Subcommand('scrub', help='clean audio tags') - scrub_cmd.parser.add_option('-W', '--nowrite', dest='write', - action='store_false', default=True, - help='leave tags empty') + self._log.info(u'scrubbing: {0}', + util.displayable_path(item.path)) + self._scrub_item(item, opts.write) + + scrub_cmd = ui.Subcommand('scrub', help=u'clean audio tags') + scrub_cmd.parser.add_option( + u'-W', u'--nowrite', dest='write', + action='store_false', default=True, + help=u'leave tags empty') scrub_cmd.func = scrub_func return [scrub_cmd] - -def _mutagen_classes(): - """Get a list of file type classes from the Mutagen module. - """ - classes = [] - for modname, clsname in _MUTAGEN_FORMATS.items(): - mod = __import__('mutagen.{0}'.format(modname), - fromlist=[clsname]) - classes.append(getattr(mod, clsname)) - return classes - - -def _scrub(path): - """Remove all tags from a file. - """ - for cls in _mutagen_classes(): - # Try opening the file with this type, but just skip in the - # event of any error. - try: - f = cls(util.syspath(path)) - except Exception: - continue - if f.tags is None: - continue - - # Remove the tag for this type. - try: - f.delete() - except NotImplementedError: - # Some Mutagen metadata subclasses (namely, ASFTag) do not - # support .delete(), presumably because it is impossible to - # remove them. In this case, we just remove all the tags. - for tag in f.keys(): - del f[tag] - f.save() - except IOError as exc: - log.error(u'could not scrub {0}: {1}'.format( - util.displayable_path(path), exc, - )) - - -# Automatically embed art into imported albums. -@ScrubPlugin.listen('write') -def write_item(path): - if not scrubbing and config['scrub']['auto']: - log.debug(u'auto-scrubbing {0}'.format(util.displayable_path(path))) - _scrub(path) + @staticmethod + def _mutagen_classes(): + """Get a list of file type classes from the Mutagen module. + """ + classes = [] + for modname, clsname in _MUTAGEN_FORMATS.items(): + mod = __import__('mutagen.{0}'.format(modname), + fromlist=[clsname]) + classes.append(getattr(mod, clsname)) + return classes + + def _scrub(self, path): + """Remove all tags from a file. + """ + for cls in self._mutagen_classes(): + # Try opening the file with this type, but just skip in the + # event of any error. + try: + f = cls(util.syspath(path)) + except Exception: + continue + if f.tags is None: + continue + + # Remove the tag for this type. + try: + f.delete() + except NotImplementedError: + # Some Mutagen metadata subclasses (namely, ASFTag) do not + # support .delete(), presumably because it is impossible to + # remove them. In this case, we just remove all the tags. + for tag in f.keys(): + del f[tag] + f.save() + except IOError as exc: + self._log.error(u'could not scrub {0}: {1}', + util.displayable_path(path), exc) + + def _scrub_item(self, item, restore=True): + """Remove tags from an Item's associated file and, if `restore` + is enabled, write the database's tags back to the file. + """ + # Get album art if we need to restore it. + if restore: + try: + mf = mediafile.MediaFile(util.syspath(item.path), + config['id3v23'].get(bool)) + except IOError as exc: + self._log.error(u'could not open file to scrub: {0}', + exc) + art = mf.art + + # Remove all tags. + self._scrub(item.path) + + # Restore tags, if enabled. + if restore: + self._log.debug(u'writing new tags after scrub') + item.try_write() + if art: + self._log.debug(u'restoring art') + mf = mediafile.MediaFile(util.syspath(item.path), + config['id3v23'].get(bool)) + mf.art = art + mf.save() + + def import_task_files(self, session, task): + """Automatically scrub imported files.""" + for item in task.imported_items(): + self._log.debug(u'auto-scrubbing {0}', + util.displayable_path(item.path)) + self._scrub_item(item) diff -Nru beets-1.3.8+dfsg/beetsplug/smartplaylist.py beets-1.3.19/beetsplug/smartplaylist.py --- beets-1.3.8+dfsg/beetsplug/smartplaylist.py 2014-09-16 01:20:29.000000000 +0000 +++ beets-1.3.19/beetsplug/smartplaylist.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Dang Mai <contact@dangmai.net>. +# Copyright 2016, Dang Mai <contact@dangmai.net>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,83 +15,20 @@ """Generates smart playlists based on beets queries. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin -from beets import config, ui, library -from beets import dbcore -from beets.util import normpath, syspath +from beets import ui +from beets.util import mkdirall, normpath, syspath, bytestring_path +from beets.library import Item, Album, parse_query_string +from beets.dbcore import OrQuery +from beets.dbcore.query import MultipleSort, ParsingError import os -# Global variable so that smartplaylist can detect database changes and run -# only once before beets exits. -database_changed = False - - -def _items_for_query(lib, playlist, album=False): - """Get the matching items for a playlist's configured queries. - `album` indicates whether to process the item-level query or the - album-level query (if any). - """ - key = 'album_query' if album else 'query' - if key not in playlist: - return [] - - # Parse quer(ies). If it's a list, join the queries with OR. - query_strings = playlist[key] - if not isinstance(query_strings, (list, tuple)): - query_strings = [query_strings] - model = library.Album if album else library.Item - query = dbcore.OrQuery( - [library.parse_query_string(q, model)[0] for q in query_strings] - ) - - # Execute query, depending on type. - if album: - result = [] - for album in lib.albums(query): - result.extend(album.items()) - return result - else: - return lib.items(query) - - -def update_playlists(lib): - ui.print_("Updating smart playlists...") - playlists = config['smartplaylist']['playlists'].get(list) - playlist_dir = config['smartplaylist']['playlist_dir'].as_filename() - relative_to = config['smartplaylist']['relative_to'].get() - if relative_to: - relative_to = normpath(relative_to) - - for playlist in playlists: - items = [] - items.extend(_items_for_query(lib, playlist, True)) - items.extend(_items_for_query(lib, playlist, False)) - - m3us = {} - basename = playlist['name'].encode('utf8') - # As we allow tags in the m3u names, we'll need to iterate through - # the items and generate the correct m3u file names. - for item in items: - m3u_name = item.evaluate_template(basename, True) - if not (m3u_name in m3us): - m3us[m3u_name] = [] - item_path = item.path - if relative_to: - item_path = os.path.relpath(item.path, relative_to) - if item_path not in m3us[m3u_name]: - m3us[m3u_name].append(item_path) - # Now iterate through the m3us that we need to generate - for m3u in m3us: - m3u_path = normpath(os.path.join(playlist_dir, m3u)) - with open(syspath(m3u_path), 'w') as f: - for path in m3us[m3u]: - f.write(path + '\n') - ui.print_("... Done") - class SmartPlaylistPlugin(BeetsPlugin): + def __init__(self): super(SmartPlaylistPlugin, self).__init__() self.config.add({ @@ -100,23 +38,168 @@ 'playlists': [] }) - def commands(self): - def update(lib, opts, args): - update_playlists(lib) - spl_update = ui.Subcommand('splupdate', - help='update the smart playlists') - spl_update.func = update - return [spl_update] + self._matched_playlists = None + self._unmatched_playlists = None + if self.config['auto']: + self.register_listener('database_change', self.db_change) -@SmartPlaylistPlugin.listen('database_change') -def handle_change(lib): - global database_changed - database_changed = True + def commands(self): + spl_update = ui.Subcommand( + 'splupdate', + help=u'update the smart playlists. Playlist names may be ' + u'passed as arguments.' + ) + spl_update.func = self.update_cmd + return [spl_update] + def update_cmd(self, lib, opts, args): + self.build_queries() + if args: + args = set(ui.decargs(args)) + for a in list(args): + if not a.endswith(".m3u"): + args.add("{0}.m3u".format(a)) + + playlists = set((name, q, a_q) + for name, q, a_q in self._unmatched_playlists + if name in args) + if not playlists: + raise ui.UserError( + u'No playlist matching any of {0} found'.format( + [name for name, _, _ in self._unmatched_playlists]) + ) + + self._matched_playlists = playlists + self._unmatched_playlists -= playlists + else: + self._matched_playlists = self._unmatched_playlists + + self.update_playlists(lib) + + def build_queries(self): + """ + Instanciate queries for the playlists. + + Each playlist has 2 queries: one or items one for albums, each with a + sort. We must also remember its name. _unmatched_playlists is a set of + tuples (name, (q, q_sort), (album_q, album_q_sort)). + + sort may be any sort, or NullSort, or None. None and NullSort are + equivalent and both eval to False. + More precisely + - it will be NullSort when a playlist query ('query' or 'album_query') + is a single item or a list with 1 element + - it will be None when there are multiple items i a query + """ + self._unmatched_playlists = set() + self._matched_playlists = set() + + for playlist in self.config['playlists'].get(list): + if 'name' not in playlist: + self._log.warn(u"playlist configuration is missing name") + continue + + playlist_data = (playlist['name'],) + try: + for key, Model in (('query', Item), ('album_query', Album)): + qs = playlist.get(key) + if qs is None: + query_and_sort = None, None + elif isinstance(qs, basestring): + query_and_sort = parse_query_string(qs, Model) + elif len(qs) == 1: + query_and_sort = parse_query_string(qs[0], Model) + else: + # multiple queries and sorts + queries, sorts = zip(*(parse_query_string(q, Model) + for q in qs)) + query = OrQuery(queries) + final_sorts = [] + for s in sorts: + if s: + if isinstance(s, MultipleSort): + final_sorts += s.sorts + else: + final_sorts.append(s) + if not final_sorts: + sort = None + elif len(final_sorts) == 1: + sort, = final_sorts + else: + sort = MultipleSort(final_sorts) + query_and_sort = query, sort + + playlist_data += (query_and_sort,) + + except ParsingError as exc: + self._log.warn(u"invalid query in playlist {}: {}", + playlist['name'], exc) + continue + + self._unmatched_playlists.add(playlist_data) + + def matches(self, model, query, album_query): + if album_query and isinstance(model, Album): + return album_query.match(model) + if query and isinstance(model, Item): + return query.match(model) + return False + + def db_change(self, lib, model): + if self._unmatched_playlists is None: + self.build_queries() + + for playlist in self._unmatched_playlists: + n, (q, _), (a_q, _) = playlist + if self.matches(model, q, a_q): + self._log.debug( + u"{0} will be updated because of {1}", n, model) + self._matched_playlists.add(playlist) + self.register_listener('cli_exit', self.update_playlists) + + self._unmatched_playlists -= self._matched_playlists + + def update_playlists(self, lib): + self._log.info(u"Updating {0} smart playlists...", + len(self._matched_playlists)) + + playlist_dir = self.config['playlist_dir'].as_filename() + playlist_dir = bytestring_path(playlist_dir) + relative_to = self.config['relative_to'].get() + if relative_to: + relative_to = normpath(relative_to) + + for playlist in self._matched_playlists: + name, (query, q_sort), (album_query, a_q_sort) = playlist + self._log.debug(u"Creating playlist {0}", name) + items = [] + + if query: + items.extend(lib.items(query, q_sort)) + if album_query: + for album in lib.albums(album_query, a_q_sort): + items.extend(album.items()) + + m3us = {} + # As we allow tags in the m3u names, we'll need to iterate through + # the items and generate the correct m3u file names. + for item in items: + m3u_name = item.evaluate_template(name, True) + if m3u_name not in m3us: + m3us[m3u_name] = [] + item_path = item.path + if relative_to: + item_path = os.path.relpath(item.path, relative_to) + if item_path not in m3us[m3u_name]: + m3us[m3u_name].append(item_path) + # Now iterate through the m3us that we need to generate + for m3u in m3us: + m3u_path = normpath(os.path.join(playlist_dir, + bytestring_path(m3u))) + mkdirall(m3u_path) + with open(syspath(m3u_path), 'wb') as f: + for path in m3us[m3u]: + f.write(path + b'\n') -@SmartPlaylistPlugin.listen('cli_exit') -def update(lib): - auto = config['smartplaylist']['auto'] - if database_changed and auto: - update_playlists(lib) + self._log.info(u"{0} playlists updated", len(self._matched_playlists)) diff -Nru beets-1.3.8+dfsg/beetsplug/spotify.py beets-1.3.19/beetsplug/spotify.py --- beets-1.3.8+dfsg/beetsplug/spotify.py 2014-09-14 20:15:22.000000000 +0000 +++ beets-1.3.19/beetsplug/spotify.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,15 +1,15 @@ -from __future__ import print_function +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + import re import webbrowser import requests -import logging from beets.plugins import BeetsPlugin from beets.ui import decargs from beets import ui from requests.exceptions import HTTPError -log = logging.getLogger('beets') - class SpotifyPlugin(BeetsPlugin): @@ -40,17 +40,17 @@ self.output_results(results) spotify_cmd = ui.Subcommand( 'spotify', - help='build a Spotify playlist' + help=u'build a Spotify playlist' ) spotify_cmd.parser.add_option( - '-m', '--mode', action='store', - help='"open" to open Spotify with playlist, ' - '"list" to print (default)' + u'-m', u'--mode', action='store', + help=u'"open" to open Spotify with playlist, ' + u'"list" to print (default)' ) spotify_cmd.parser.add_option( - '-f', '--show-failures', action='store_true', - help='list tracks that did not match a Sptoify ID', - dest='show_failures', + u'-f', u'--show-failures', + action='store_true', dest='show_failures', + help=u'list tracks that did not match a Spotify ID' ) spotify_cmd.func = queries return [spotify_cmd] @@ -63,8 +63,8 @@ self.config['show_failures'].set(True) if self.config['mode'].get() not in ['list', 'open']: - log.warn(u'{0} is not a valid mode' - .format(self.config['mode'].get())) + self._log.warn(u'{0} is not a valid mode', + self.config['mode'].get()) return False self.opts = opts @@ -78,10 +78,11 @@ items = lib.items(query) if not items: - log.debug(u'Your beets query returned no items, skipping spotify') + self._log.debug(u'Your beets query returned no items, ' + u'skipping spotify') return - log.info(u'Processing {0} tracks...'.format(len(items))) + self._log.info(u'Processing {0} tracks...', len(items)) for item in items: @@ -109,12 +110,12 @@ r = requests.get(self.base_url, params={ "q": search_url, "type": "track" }) - log.debug(r.url) + self._log.debug('{}', r.url) try: r.raise_for_status() except HTTPError as e: - log.debug(u'URL returned a {0} error' - .format(e.response.status_code)) + self._log.debug(u'URL returned a {0} error', + e.response.status_code) failures.append(search_url) continue @@ -123,48 +124,47 @@ # Apply market filter if requested region_filter = self.config['region_filter'].get() if region_filter: - r_data = filter( - lambda x: region_filter in x['available_markets'], r_data - ) + r_data = [x for x in r_data if region_filter + in x['available_markets']] # Simplest, take the first result chosen_result = None if len(r_data) == 1 or self.config['tiebreak'].get() == "first": - log.debug(u'Spotify track(s) found, count: {0}' - .format(len(r_data))) + self._log.debug(u'Spotify track(s) found, count: {0}', + len(r_data)) chosen_result = r_data[0] elif len(r_data) > 1: # Use the popularity filter - log.debug(u'Most popular track chosen, count: {0}' - .format(len(r_data))) + self._log.debug(u'Most popular track chosen, count: {0}', + len(r_data)) chosen_result = max(r_data, key=lambda x: x['popularity']) if chosen_result: results.append(chosen_result) else: - log.debug(u'No spotify track found: {0}'.format(search_url)) + self._log.debug(u'No spotify track found: {0}', search_url) failures.append(search_url) failure_count = len(failures) if failure_count > 0: if self.config['show_failures'].get(): - log.info(u'{0} track(s) did not match a Spotify ID:' - .format(failure_count)) + self._log.info(u'{0} track(s) did not match a Spotify ID:', + failure_count) for track in failures: - log.info(u'track: {0}'.format(track)) - log.info(u'') + self._log.info(u'track: {0}', track) + self._log.info(u'') else: - log.warn(u'{0} track(s) did not match a Spotify ID;\n' - u'use --show-failures to display' - .format(failure_count)) + self._log.warn(u'{0} track(s) did not match a Spotify ID;\n' + u'use --show-failures to display', + failure_count) return results def output_results(self, results): if results: - ids = map(lambda x: x['id'], results) + ids = [x['id'] for x in results] if self.config['mode'].get() == "open": - log.info(u'Attempting to open Spotify with playlist') + self._log.info(u'Attempting to open Spotify with playlist') spotify_url = self.playlist_partial + ",".join(ids) webbrowser.open(spotify_url) @@ -172,4 +172,4 @@ for item in ids: print(unicode.encode(self.open_url + item)) else: - log.warn(u'No Spotify tracks found from beets query') + self._log.warn(u'No Spotify tracks found from beets query') diff -Nru beets-1.3.8+dfsg/beetsplug/the.py beets-1.3.19/beetsplug/the.py --- beets-1.3.8+dfsg/beetsplug/the.py 2014-04-14 00:43:22.000000000 +0000 +++ beets-1.3.19/beetsplug/the.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Blemjhoo Tezoulbr <baobab@heresiarch.info>. +# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,8 +15,9 @@ """Moves patterns in path formats (suitable for moving articles).""" +from __future__ import division, absolute_import, print_function + import re -import logging from beets.plugins import BeetsPlugin __author__ = 'baobab@heresiarch.info' @@ -28,13 +30,6 @@ class ThePlugin(BeetsPlugin): - _instance = None - _log = logging.getLogger('beets') - - the = True - a = True - format = u'' - strip = False patterns = [] def __init__(self): @@ -56,17 +51,17 @@ try: re.compile(p) except re.error: - self._log.error(u'[the] invalid pattern: {0}'.format(p)) + self._log.error(u'invalid pattern: {0}', p) else: if not (p.startswith('^') or p.endswith('$')): - self._log.warn(u'[the] warning: \"{0}\" will not ' - 'match string start/end'.format(p)) + self._log.warn(u'warning: \"{0}\" will not ' + u'match string start/end', p) if self.config['a']: self.patterns = [PATTERN_A] + self.patterns if self.config['the']: self.patterns = [PATTERN_THE] + self.patterns if not self.patterns: - self._log.warn(u'[the] no patterns defined!') + self._log.warn(u'no patterns defined!') def unthe(self, text, pattern): """Moves pattern in the path format string or strips it @@ -99,7 +94,7 @@ r = self.unthe(text, p) if r != text: break - self._log.debug(u'[the] \"{0}\" -> \"{1}\"'.format(text, r)) + self._log.debug(u'\"{0}\" -> \"{1}\"', text, r) return r else: return u'' diff -Nru beets-1.3.8+dfsg/beetsplug/thumbnails.py beets-1.3.19/beetsplug/thumbnails.py --- beets-1.3.8+dfsg/beetsplug/thumbnails.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/beetsplug/thumbnails.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Bruno Cauet +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Create freedesktop.org-compliant thumbnails for album folders + +This plugin is POSIX-only. +Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html +""" + +from __future__ import division, absolute_import, print_function + +from hashlib import md5 +import os +import shutil +from itertools import chain +from pathlib import PurePosixPath +import ctypes +import ctypes.util + +from xdg import BaseDirectory + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand, decargs +from beets import util +from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version + + +BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails") +NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal")) +LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large")) + + +class ThumbnailsPlugin(BeetsPlugin): + def __init__(self): + super(ThumbnailsPlugin, self).__init__() + self.config.add({ + 'auto': True, + 'force': False, + 'dolphin': False, + }) + + self.write_metadata = None + if self.config['auto'] and self._check_local_ok(): + self.register_listener('art_set', self.process_album) + + def commands(self): + thumbnails_command = Subcommand("thumbnails", + help=u"Create album thumbnails") + thumbnails_command.parser.add_option( + u'-f', u'--force', + dest='force', action='store_true', default=False, + help=u'force regeneration of thumbnails deemed fine (existing & ' + u'recent enough)') + thumbnails_command.parser.add_option( + u'--dolphin', dest='dolphin', action='store_true', default=False, + help=u"create Dolphin-compatible thumbnail information (for KDE)") + thumbnails_command.func = self.process_query + + return [thumbnails_command] + + def process_query(self, lib, opts, args): + self.config.set_args(opts) + if self._check_local_ok(): + for album in lib.albums(decargs(args)): + self.process_album(album) + + def _check_local_ok(self): + """Check that's everythings ready: + - local capability to resize images + - thumbnail dirs exist (create them if needed) + - detect whether we'll use PIL or IM + - detect whether we'll use GIO or Python to get URIs + """ + if not ArtResizer.shared.local: + self._log.warning(u"No local image resizing capabilities, " + u"cannot generate thumbnails") + return False + + for dir in (NORMAL_DIR, LARGE_DIR): + if not os.path.exists(dir): + os.makedirs(dir) + + if get_im_version(): + self.write_metadata = write_metadata_im + tool = "IM" + else: + assert get_pil_version() # since we're local + self.write_metadata = write_metadata_pil + tool = "PIL" + self._log.debug(u"using {0} to write metadata", tool) + + uri_getter = GioURI() + if not uri_getter.available: + uri_getter = PathlibURI() + self._log.debug(u"using {0.name} to compute URIs", uri_getter) + self.get_uri = uri_getter.uri + + return True + + def process_album(self, album): + """Produce thumbnails for the album folder. + """ + self._log.debug(u'generating thumbnail for {0}', album) + if not album.artpath: + self._log.info(u'album {0} has no art', album) + return + + if self.config['dolphin']: + self.make_dolphin_cover_thumbnail(album) + + size = ArtResizer.shared.get_size(album.artpath) + if not size: + self._log.warning(u'problem getting the picture size for {0}', + album.artpath) + return + + wrote = True + if max(size) >= 256: + wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR) + wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR) + + if wrote: + self._log.info(u'wrote thumbnail for {0}', album) + else: + self._log.info(u'nothing to do for {0}', album) + + def make_cover_thumbnail(self, album, size, target_dir): + """Make a thumbnail of given size for `album` and put it in + `target_dir`. + """ + target = os.path.join(target_dir, self.thumbnail_file_name(album.path)) + + if os.path.exists(target) and \ + os.stat(target).st_mtime > os.stat(album.artpath).st_mtime: + if self.config['force']: + self._log.debug(u"found a suitable {1}x{1} thumbnail for {0}, " + u"forcing regeneration", album, size) + else: + self._log.debug(u"{1}x{1} thumbnail for {0} exists and is " + u"recent enough", album, size) + return False + resized = ArtResizer.shared.resize(size, album.artpath, + util.syspath(target)) + self.add_tags(album, util.syspath(resized)) + shutil.move(resized, target) + return True + + def thumbnail_file_name(self, path): + """Compute the thumbnail file name + See http://standards.freedesktop.org/thumbnail-spec/latest/x227.html + """ + uri = self.get_uri(path) + hash = md5(uri.encode('utf-8')).hexdigest() + return util.bytestring_path("{0}.png".format(hash)) + + def add_tags(self, album, image_path): + """Write required metadata to the thumbnail + See http://standards.freedesktop.org/thumbnail-spec/latest/x142.html + """ + metadata = {"Thumb::URI": self.get_uri(album.artpath), + "Thumb::MTime": unicode(os.stat(album.artpath).st_mtime)} + try: + self.write_metadata(image_path, metadata) + except Exception: + self._log.exception(u"could not write metadata to {0}", + util.displayable_path(image_path)) + + def make_dolphin_cover_thumbnail(self, album): + outfilename = os.path.join(album.path, b".directory") + if os.path.exists(outfilename): + return + artfile = os.path.split(album.artpath)[1] + with open(outfilename, 'w') as f: + f.write('[Desktop Entry]\n') + f.write('Icon=./{0}'.format(artfile.decode('utf-8'))) + f.close() + self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename)) + + +def write_metadata_im(file, metadata): + """Enrich the file metadata with `metadata` dict thanks to IM.""" + command = ['convert', file] + \ + list(chain.from_iterable(('-set', k, v) + for k, v in metadata.items())) + [file] + util.command_output(command) + return True + + +def write_metadata_pil(file, metadata): + """Enrich the file metadata with `metadata` dict thanks to PIL.""" + from PIL import Image, PngImagePlugin + im = Image.open(file) + meta = PngImagePlugin.PngInfo() + for k, v in metadata.items(): + meta.add_text(k, v, 0) + im.save(file, "PNG", pnginfo=meta) + return True + + +class URIGetter(object): + available = False + name = "Abstract base" + + def uri(self, path): + raise NotImplementedError() + + +class PathlibURI(URIGetter): + available = True + name = "Python Pathlib" + + def uri(self, path): + return PurePosixPath(path).as_uri() + + +def copy_c_string(c_string): + """Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python + string and return it. The old memory is then safe to free. + """ + # This is a pretty dumb way to get a string copy, but it seems to + # work. A more surefire way would be to allocate a ctypes buffer and copy + # the data with `memcpy` or somesuch. + s = ctypes.cast(c_string, ctypes.c_char_p).value + return b'' + s + + +class GioURI(URIGetter): + """Use gio URI function g_file_get_uri. Paths must be utf-8 encoded. + """ + name = "GIO" + + def __init__(self): + self.libgio = self.get_library() + self.available = bool(self.libgio) + if self.available: + self.libgio.g_type_init() # for glib < 2.36 + + self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p] + self.libgio.g_file_new_for_path.restype = ctypes.c_void_p + + self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p] + self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char) + + self.libgio.g_object_unref.argtypes = [ctypes.c_void_p] + + def get_library(self): + lib_name = ctypes.util.find_library("gio-2") + try: + if not lib_name: + return False + return ctypes.cdll.LoadLibrary(lib_name) + except OSError: + return False + + def uri(self, path): + g_file_ptr = self.libgio.g_file_new_for_path(path) + if not g_file_ptr: + raise RuntimeError(u"No gfile pointer received for {0}".format( + util.displayable_path(path))) + + try: + uri_ptr = self.libgio.g_file_get_uri(g_file_ptr) + except: + raise + finally: + self.libgio.g_object_unref(g_file_ptr) + if not uri_ptr: + self.libgio.g_free(uri_ptr) + raise RuntimeError(u"No URI received from the gfile pointer for " + u"{0}".format(util.displayable_path(path))) + + try: + uri = copy_c_string(uri_ptr) + except: + raise + finally: + self.libgio.g_free(uri_ptr) + return uri diff -Nru beets-1.3.8+dfsg/beetsplug/types.py beets-1.3.19/beetsplug/types.py --- beets-1.3.8+dfsg/beetsplug/types.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/beetsplug/types.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,6 +13,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin from beets.dbcore import types from beets.util.confit import ConfigValueError @@ -22,6 +25,13 @@ @property def item_types(self): + return self._types() + + @property + def album_types(self): + return self._types() + + def _types(self): if not self.config.exists(): return {} diff -Nru beets-1.3.8+dfsg/beetsplug/web/__init__.py beets-1.3.19/beetsplug/web/__init__.py --- beets-1.3.8+dfsg/beetsplug/web/__init__.py 2014-04-14 00:55:35.000000000 +0000 +++ beets-1.3.19/beetsplug/web/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,6 +14,8 @@ # included in all copies or substantial portions of the Software. """A Web interface to beets.""" +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin from beets import ui from beets import util @@ -87,7 +90,7 @@ ) else: return flask.abort(404) - responder.__name__ = 'get_%s' % name + responder.__name__ = 'get_{0}'.format(name) return responder return make_responder @@ -101,7 +104,7 @@ json_generator(query_func(queries), root='results'), mimetype='application/json' ) - responder.__name__ = 'query_%s' % name + responder.__name__ = 'query_{0}'.format(name) return responder return make_responder @@ -116,11 +119,21 @@ json_generator(list_all(), root=name), mimetype='application/json' ) - responder.__name__ = 'all_%s' % name + responder.__name__ = 'all_{0}'.format(name) return responder return make_responder +def _get_unique_table_field_values(model, field, sort_field): + """ retrieve all unique values belonging to a key from a model """ + if field not in model.all_keys() or sort_field not in model.all_keys(): + raise KeyError + with g.lib.transaction() as tx: + rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"' + .format(field, model._table, sort_field)) + return [row[0] for row in rows] + + class IdListConverter(BaseConverter): """Converts comma separated lists of ids in urls to integer lists. """ @@ -191,6 +204,17 @@ return g.lib.items(queries) +@app.route('/item/values/<string:key>') +def item_unique_field_values(key): + sort_key = flask.request.args.get('sort_key', key) + try: + values = _get_unique_table_field_values(beets.library.Item, key, + sort_key) + except KeyError: + return flask.abort(404) + return flask.jsonify(values=values) + + # Albums. @app.route('/album/<idlist:ids>') @@ -215,7 +239,21 @@ @app.route('/album/<int:album_id>/art') def album_art(album_id): album = g.lib.get_album(album_id) - return flask.send_file(album.artpath) + if album.artpath: + return flask.send_file(album.artpath) + else: + return flask.abort(404) + + +@app.route('/album/values/<string:key>') +def album_unique_field_values(key): + sort_key = flask.request.args.get('sort_key', key) + try: + values = _get_unique_table_field_values(beets.library.Album, key, + sort_key) + except KeyError: + return flask.abort(404) + return flask.jsonify(values=values) # Artists. @@ -254,14 +292,15 @@ def __init__(self): super(WebPlugin, self).__init__() self.config.add({ - 'host': u'', + 'host': u'127.0.0.1', 'port': 8337, + 'cors': '', }) def commands(self): - cmd = ui.Subcommand('web', help='start a Web interface') - cmd.parser.add_option('-d', '--debug', action='store_true', - default=False, help='debug mode') + cmd = ui.Subcommand('web', help=u'start a Web interface') + cmd.parser.add_option(u'-d', u'--debug', action='store_true', + default=False, help=u'debug mode') def func(lib, opts, args): args = ui.decargs(args) @@ -271,6 +310,17 @@ self.config['port'] = int(args.pop(0)) app.config['lib'] = lib + # Enable CORS if required. + if self.config['cors']: + self._log.info(u'Enabling CORS with origin: {0}', + self.config['cors']) + from flask.ext.cors import CORS + app.config['CORS_ALLOW_HEADERS'] = "Content-Type" + app.config['CORS_RESOURCES'] = { + r"/*": {"origins": self.config['cors'].get(str)} + } + CORS(app) + # Start the web application. app.run(host=self.config['host'].get(unicode), port=self.config['port'].get(int), debug=opts.debug, threaded=True) diff -Nru beets-1.3.8+dfsg/beetsplug/web/static/beets.js beets-1.3.19/beetsplug/web/static/beets.js --- beets-1.3.8+dfsg/beetsplug/web/static/beets.js 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/beetsplug/web/static/beets.js 2016-06-20 01:53:12.000000000 +0000 @@ -146,7 +146,8 @@ "item/query/:query": "itemQuery", }, itemQuery: function(query) { - $.getJSON('/item/query/' + query, function(data) { + var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/'); + $.getJSON('/item/query/' + queryURL, function(data) { var models = _.map( data['results'], function(d) { return new Item(d); } @@ -228,7 +229,7 @@ }, querySubmit: function(ev) { ev.preventDefault(); - router.navigate('item/query/' + escape($('#query').val()), true); + router.navigate('item/query/' + encodeURIComponent($('#query').val()), true); }, initialize: function() { this.playingItem = null; diff -Nru beets-1.3.8+dfsg/beetsplug/web/static/jquery.js beets-1.3.19/beetsplug/web/static/jquery.js --- beets-1.3.8+dfsg/beetsplug/web/static/jquery.js 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/beetsplug/web/static/jquery.js 2016-06-20 01:53:12.000000000 +0000 @@ -2,13 +2,13 @@ * jQuery JavaScript Library v1.7.1 * http://jquery.com/ * - * Copyright 2013, John Resig + * Copyright 2016, John Resig * Dual licensed under the MIT or GPL Version 2 licenses. * http://jquery.org/license * * Includes Sizzle.js * http://sizzlejs.com/ - * Copyright 2013, The Dojo Foundation + * Copyright 2016, The Dojo Foundation * Released under the MIT, BSD, and GPL Licenses. * * Date: Mon Nov 21 21:11:03 2011 -0500 @@ -3851,7 +3851,7 @@ /*! * Sizzle CSS Selector Engine - * Copyright 2013, The Dojo Foundation + * Copyright 2016, The Dojo Foundation * Released under the MIT, BSD, and GPL Licenses. * More information: http://sizzlejs.com/ */ diff -Nru beets-1.3.8+dfsg/beetsplug/zero.py beets-1.3.19/beetsplug/zero.py --- beets-1.3.8+dfsg/beetsplug/zero.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/beetsplug/zero.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Blemjhoo Tezoulbr <baobab@heresiarch.info>. +# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,18 +15,17 @@ """ Clears tag fields in media files.""" +from __future__ import division, absolute_import, print_function + import re -import logging from beets.plugins import BeetsPlugin -from beets.library import Item +from beets.mediafile import MediaFile from beets.importer import action from beets.util import confit __author__ = 'baobab@heresiarch.info' __version__ = '0.10' -log = logging.getLogger('beets') - class ZeroPlugin(BeetsPlugin): @@ -41,30 +41,67 @@ self.config.add({ 'fields': [], + 'keep_fields': [], + 'update_database': False, }) self.patterns = {} self.warned = False - for field in self.config['fields'].as_str_seq(): - if field in ('id', 'path', 'album_id'): - log.warn(u'[zero] field \'{0}\' ignored, zeroing ' - u'it would be dangerous'.format(field)) + # We'll only handle `fields` or `keep_fields`, but not both. + if self.config['fields'] and self.config['keep_fields']: + self._log.warn(u'cannot blacklist and whitelist at the same time') + + # Blacklist mode. + if self.config['fields']: + self.validate_config('fields') + for field in self.config['fields'].as_str_seq(): + self.set_pattern(field) + + # Whitelist mode. + elif self.config['keep_fields']: + self.validate_config('keep_fields') + + for field in MediaFile.fields(): + if field in self.config['keep_fields'].as_str_seq(): + continue + self.set_pattern(field) + + # These fields should always be preserved. + for key in ('id', 'path', 'album_id'): + if key in self.patterns: + del self.patterns[key] + + def validate_config(self, mode): + """Check whether fields in the configuration are valid. + + `mode` should either be "fields" or "keep_fields", indicating + the section of the configuration to validate. + """ + for field in self.config[mode].as_str_seq(): + if field not in MediaFile.fields(): + self._log.error(u'invalid field: {0}', field) continue - if field not in Item._fields.keys(): - log.error(u'[zero] invalid field: {0}'.format(field)) + if mode == 'fields' and field in ('id', 'path', 'album_id'): + self._log.warn(u'field \'{0}\' ignored, zeroing ' + u'it would be dangerous', field) continue - try: - self.patterns[field] = self.config[field].as_str_seq() - except confit.NotFoundError: - # Matches everything - self.patterns[field] = [u''] + def set_pattern(self, field): + """Set a field in `self.patterns` to a string list corresponding to + the configuration, or `True` if the field has no specific + configuration. + """ + try: + self.patterns[field] = self.config[field].as_str_seq() + except confit.NotFoundError: + # Matches everything + self.patterns[field] = True def import_task_choice_event(self, session, task): """Listen for import_task_choice event.""" if task.choice_flag == action.ASIS and not self.warned: - log.warn(u'[zero] cannot zero in \"as-is\" mode') + self._log.warn(u'cannot zero in \"as-is\" mode') self.warned = True # TODO request write in as-is mode @@ -73,23 +110,31 @@ """Check if field (as string) is matching any of the patterns in the list. """ + if patterns is True: + return True for p in patterns: if re.search(p, unicode(field), flags=re.IGNORECASE): return True return False def write_event(self, item, path, tags): - """Listen for write event.""" + """Set values in tags to `None` if the key and value are matched + by `self.patterns`. + """ if not self.patterns: - log.warn(u'[zero] no fields, nothing to do') + self._log.warn(u'no fields, nothing to do') return for field, patterns in self.patterns.items(): - if field not in tags: - log.error(u'[zero] no such field: {0}'.format(field)) - continue + if field in tags: + value = tags[field] + match = self.match_patterns(tags[field], patterns) + else: + value = '' + match = patterns is True - value = tags[field] - if self.match_patterns(value, patterns): - log.debug(u'[zero] {0}: {1} -> None'.format(field, value)) + if match: + self._log.debug(u'{0}: {1} -> None', field, value) tags[field] = None + if self.config['update_database']: + item[field] = None diff -Nru beets-1.3.8+dfsg/debian/beets-doc.links beets-1.3.19/debian/beets-doc.links --- beets-1.3.8+dfsg/debian/beets-doc.links 2012-06-03 14:23:27.000000000 +0000 +++ beets-1.3.19/debian/beets-doc.links 2016-08-30 03:40:16.000000000 +0000 @@ -1,3 +1,3 @@ -/usr/share/doc/beets-doc/html/_sources /usr/share/doc/beets-doc/rst /usr/share/doc/beets-doc/html /usr/share/doc/beets/html +/usr/share/doc/beets-doc/html/_sources /usr/share/doc/beets-doc/rst /usr/share/doc/beets-doc/rst /usr/share/doc/beets/rst diff -Nru beets-1.3.8+dfsg/debian/beets.links beets-1.3.19/debian/beets.links --- beets-1.3.8+dfsg/debian/beets.links 2012-05-19 18:31:19.000000000 +0000 +++ beets-1.3.19/debian/beets.links 2016-08-30 03:40:16.000000000 +0000 @@ -1,3 +1,3 @@ +/usr/share/javascript/backbone/backbone.js /usr/share/beets/beetsplug/web/static/backbone.js /usr/share/javascript/jquery/jquery.js /usr/share/beets/beetsplug/web/static/jquery.js /usr/share/javascript/underscore/underscore.js /usr/share/beets/beetsplug/web/static/underscore.js -/usr/share/javascript/backbone/backbone.js /usr/share/beets/beetsplug/web/static/backbone.js diff -Nru beets-1.3.8+dfsg/debian/changelog beets-1.3.19/debian/changelog --- beets-1.3.8+dfsg/debian/changelog 2015-01-18 19:39:11.000000000 +0000 +++ beets-1.3.19/debian/changelog 2016-08-30 05:07:14.000000000 +0000 @@ -1,3 +1,37 @@ +beets (1.3.19-2) unstable; urgency=medium + + * Fix occasional FTBFS due to lack of mock cleanup. Thanks Santiago Vila. + (Closes: #835937) + * Fix autopkgtests by adding the test dependencies, and a sane unittest + incantation. + * Drop unittest2 and nose Build-Depends, no longer used. + + -- Stefano Rivera <stefanor@debian.org> Mon, 29 Aug 2016 22:07:14 -0700 + +beets (1.3.19-1) unstable; urgency=medium + + * New upstream release. (Closes: #775719, #792060, LP: #1502394) + - No need to repack any more. + - Drop patches, superseded upstream. + - Update copyright. + - Add Build-Depends: python-{bs4,mpd,mutagen,pathlib,rarfile,xdg} + - Add Suggests (plugin dependencies): + python-{dbus,pathlib,rarfile,requests-oauthlib,xdg} + - Update gstreamer Suggests to python-gst-1.0 (Closes: #785903) + - Upstream dropped Python 2.6 support. + * Drop Build-Conflicts: python-rarfile, it seems to be working now. + * Patches: + - Bundled a function from jellyfish, pending resolving a package namespace + problem blocking packaging jellyfish. See #806716. + - Cherry pick upstream fixes for test_mediafile_edge and test_hidden. + - Skip tests for path queries, as these are broken by Debian sqlite using + SQLITE_LIKE_DOESNT_MATCH_BLOBS. + * Switch watch file to pypi.debian.net. (Closes: #815761) + * Bump debhelper compat level to 9. + * Bump Standards-Version to 3.9.8, no changes needed. + + -- Stefano Rivera <stefanor@debian.org> Tue, 23 Aug 2016 01:18:46 -0700 + beets (1.3.8+dfsg-2) unstable; urgency=medium * Patch: test_bucket-years. Fix test failure when the current year is >= diff -Nru beets-1.3.8+dfsg/debian/compat beets-1.3.19/debian/compat --- beets-1.3.8+dfsg/debian/compat 2011-11-21 09:30:35.000000000 +0000 +++ beets-1.3.19/debian/compat 2016-08-23 00:06:07.000000000 +0000 @@ -1 +1 @@ -7 +9 diff -Nru beets-1.3.8+dfsg/debian/control beets-1.3.19/debian/control --- beets-1.3.8+dfsg/debian/control 2014-10-23 17:41:37.000000000 +0000 +++ beets-1.3.19/debian/control 2016-08-30 03:40:16.000000000 +0000 @@ -6,31 +6,32 @@ Stefano Rivera <stefanor@debian.org>, Simon Chopin <chopin.simon@gmail.com> Build-Depends: - debhelper (>= 8.1), + debhelper (>= 9), + dh-python, libc-bin (>= 2.13), python-all (>= 2.6.6-3~), - python-docutils, - python-enum34, + python-bs4, + python-enum34 (>= 1.0.4), python-flask, python-mock, + python-mpd, python-munkres, python-musicbrainzngs (>= 0.4), - python-mutagen (>= 1.22), - python-nose, + python-mutagen (>= 1.27), + python-pathlib, python-pylast, + python-rarfile, python-responses, python-setuptools, python-sphinx (>= 1.0.7+dfsg), python-unidecode, - python-unittest2, + python-xdg, python-yaml -Build-Conflicts: python-rarfile -X-Python-Version: >= 2.5 -Standards-Version: 3.9.6 +X-Python-Version: >= 2.7 +Standards-Version: 3.9.8 Homepage: http://beets.radbox.org/ Vcs-Svn: svn://anonscm.debian.org/python-apps/packages/beets/trunk/ Vcs-Browser: http://anonscm.debian.org/viewvc/python-apps/packages/beets/trunk/ -XS-Testsuite: autopkgtest Package: beets Architecture: all @@ -46,18 +47,21 @@ ${python:Depends} Suggests: beets-doc, - gir1.2-gstreamer-1.0, libav-tools, mp3gain, python-acoustid, python-bs4, + python-dbus, python-flask, - python-gi, - python-gst0.10, + python-gst-1.0, python-imaging, python-mpd, + python-pathlib, python-pylast, - python-requests + python-rarfile, + python-requests, + python-requests-oauthlib, + python-xdg Description: music tagger and library organizer Beets is a media library management system for obsessive-compulsive music geeks. diff -Nru beets-1.3.8+dfsg/debian/copyright beets-1.3.19/debian/copyright --- beets-1.3.8+dfsg/debian/copyright 2014-10-23 21:35:02.000000000 +0000 +++ beets-1.3.19/debian/copyright 2016-08-30 03:40:16.000000000 +0000 @@ -2,35 +2,39 @@ Upstream-Name: Beets Upstream-Contact: Adrian Sampson <adrian@radbox.org> Source: http://beets.radbox.org/ -Files-Excluded: - test/rsrc/lyrics -Comment: - The upstream includes verbatim HTML from lyrics websites, with no clear - license to redistribute this. A few of these pages claim All Rights Reserved. Files: * -Copyright: 2010-2014, Adrian Sampson <adrian@radbox.org> - 2012-2014, Fabrice Laporte - 2013-2014, Thomas Scholtes +Copyright: 2010-2016, Adrian Sampson <adrian@radbox.org> + 2012-2016, Fabrice Laporte + 2013-2016, Thomas Scholtes License: Expat Files: beetsplug/* -Copyright: 2010-2014, Adrian Sampson <adrian@radbox.org> - 2014, aroquen - 2012-2014, Blemjhoo Tezoulbr <baobab@heresiarch.info> - 2014, Dang Mai <contact@dangmai.net> - 2012-2013, David Brenner - 2014, David Hamp-Gonsalves - 2012-2014, Fabrice Laporte - 2012-2014, Jakob Schnitzer - 2013, Jan-Erik Dahlin - 2013, Johann Klähn - 2013, Pedro Silva - 2013, Peter Schnebel - 2011-2013, Philippe Mongeau - 2014, Thomas Scholtes - 2013, Verrus - 2014, Yevgeny Bezman + test/* +Copyright: 2010-2016, Adrian Sampson <adrian@radbox.org> + 2014-2016, aroquen + 2012-2016, Blemjhoo Tezoulbr <baobab@heresiarch.info> + 2016, Bruno Cauet + 2013-2016, Dang Mai <contact@dangmai.net> + 2014-2016, David Hamp-Gonsalves + 2016, Diego Moreda + 2012-2016, Fabrice Laporte + 2016, François-Xavier Thomas + 2016, Heinz Wiesinger + 2012-2016, Jakob Schnitzer + 2013-2016, Jan-Erik Dahlin + 2013-2016, Johann Klähn + 2016, Malte Ried + 2016, Matt Lichtenberg + 2015-2016, Ohm Patel + 2013-2016, Pedro Silva + 2013-2016, Peter Schnebel + 2011-2016, Philippe Mongeau + 2016, Rafael Bodill <http://github.com/rafi> + 2014-2016, Thomas Scholtes + 2016, Tom Jaspers + 2013-2016, Verrus <github.com/Verrus/beets-plugin-featInTitle> + 2014-2016, Yevgeny Bezman License: Expat Files: beetsplug/mbcollection.py @@ -43,17 +47,15 @@ License: Expat Files: beetsplug/web/static/jquery.js -Copyright: 2011-2013, John Resig, - 2011-2013, The Dojo Foundation +Copyright: 2011-2016, John Resig, + 2011-2016, The Dojo Foundation License: Expat or GPL-2 +Comment: You may use any jQuery project under the terms of either the MIT License or the GNU General Public License (GPL) Version 2 - . - On Debian GNU/Linux systems, the complete text of the GNU General - Public License can be found in `/usr/share/common-licenses/GPL-2' Files: debian/* -Copyright: 2010-2014, Stefano Rivera <stefanor@debian.org> +Copyright: 2010-2016, Stefano Rivera <stefanor@debian.org> 2012-2014, Simon Chopin <chopin.simon@gmail.com> License: Expat @@ -88,3 +90,7 @@ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +License: GPL-2 + On Debian GNU/Linux systems, the complete text of the GNU General + Public License can be found in `/usr/share/common-licenses/GPL-2' diff -Nru beets-1.3.8+dfsg/debian/patches/disable-broken-tests beets-1.3.19/debian/patches/disable-broken-tests --- beets-1.3.8+dfsg/debian/patches/disable-broken-tests 2014-10-23 17:41:37.000000000 +0000 +++ beets-1.3.19/debian/patches/disable-broken-tests 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -Description: Disable the pyechonest dependency for the tests - This will in turn disable the tests themselves. -Author: Simon Chopin <chopin.simon@gmail.com> -Forwarded: not-needed -Last-Update: 2014-10-21 - ---- a/setup.py -+++ b/setup.py -@@ -86,10 +86,8 @@ - - tests_require=[ - 'responses', -- 'pyechonest', - 'mock', - 'flask', -- 'rarfile', - 'pylast', - ], - diff -Nru beets-1.3.8+dfsg/debian/patches/fix-test_hidden beets-1.3.19/debian/patches/fix-test_hidden --- beets-1.3.8+dfsg/debian/patches/fix-test_hidden 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/debian/patches/fix-test_hidden 2016-08-23 00:19:51.000000000 +0000 @@ -0,0 +1,17 @@ +Description: test_hidden was missing suite() which made testall.suite() fail +Author: Christoph Reiter <reiter.christoph@gmail.com> +Origin: upstream, https://github.com/beetbox/beets/commit/06072c5d7d2bc33a9a7cf041b8fc5bd362758a69 + +--- a/test/test_hidden.py ++++ b/test/test_hidden.py +@@ -72,3 +72,10 @@ + + with tempfile.NamedTemporaryFile(prefix='.tmp') as f: + self.assertTrue(hidden.is_hidden(f.name)) ++ ++ ++def suite(): ++ return unittest.TestLoader().loadTestsFromName(__name__) ++ ++if __name__ == '__main__': ++ unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/debian/patches/fix-test_mediafile_edge beets-1.3.19/debian/patches/fix-test_mediafile_edge --- beets-1.3.8+dfsg/debian/patches/fix-test_mediafile_edge 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/debian/patches/fix-test_mediafile_edge 2016-08-23 00:56:25.000000000 +0000 @@ -0,0 +1,15 @@ +Description: A different exception is now returned for unreadable files +Origin: upstream, https://github.com/beetbox/beets/pull/2088 +Author: Christoph Reiter <reiter.christoph@gmail.com> + +--- a/test/test_mediafile_edge.py ++++ b/test/test_mediafile_edge.py +@@ -192,7 +192,7 @@ + fn = os.path.join(_common.RSRC, b'brokenlink') + os.symlink('does_not_exist', fn) + try: +- self.assertRaises(IOError, ++ self.assertRaises(beets.mediafile.UnreadableFileError, + beets.mediafile.MediaFile, fn) + finally: + os.unlink(fn) diff -Nru beets-1.3.8+dfsg/debian/patches/fix-test_nonexistent_file beets-1.3.19/debian/patches/fix-test_nonexistent_file --- beets-1.3.8+dfsg/debian/patches/fix-test_nonexistent_file 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/debian/patches/fix-test_nonexistent_file 2016-08-29 23:18:14.000000000 +0000 @@ -0,0 +1,34 @@ +Description: Manage patching of try_filesize +Author: Jesse Weinstein <jesse@wefu.org> +Origin: upstream, https://github.com/beetbox/beets/issues/2137 +Bug-Debian: https://bugs.debian.org/835937 +Bug-Upstream: https://github.com/beetbox/beets/issues/2135 + +--- a/test/test_ui.py ++++ b/test/test_ui.py +@@ -24,7 +24,7 @@ import subprocess + import platform + from copy import deepcopy + +-from mock import patch ++from mock import patch, Mock + from test import _common + from test._common import unittest + from test.helper import capture_stdout, has_program, TestHelper, control_stdin +@@ -1053,6 +1053,7 @@ class ShowChangeTest(_common.TestCase): + u'caf.mp3 ->' in msg) + + ++@patch('beets.library.Item.try_filesize', Mock(return_value=987)) + class SummarizeItemsTest(_common.TestCase): + def setUp(self): + super(SummarizeItemsTest, self).setUp() +@@ -1061,8 +1062,6 @@ class SummarizeItemsTest(_common.TestCase): + item.length = 10 * 60 + 54 + item.format = "F" + self.item = item +- fsize_mock = patch('beets.library.Item.try_filesize').start() +- fsize_mock.return_value = 987 + + def test_summarize_item(self): + summary = commands.summarize_items([], True) diff -Nru beets-1.3.8+dfsg/debian/patches/no-jellyfish beets-1.3.19/debian/patches/no-jellyfish --- beets-1.3.8+dfsg/debian/patches/no-jellyfish 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/debian/patches/no-jellyfish 2016-08-23 06:42:44.000000000 +0000 @@ -0,0 +1,86 @@ +Description: Bundle levenshtein_distance from jellyfish + Debian already has a Python library called jellyfish. While we resolve that + problem, let's avoid the need for re-packaging jellyfish. +Bug-Debian: https://bugs.debian.org/806716 +Author: Stefano Rivera <stefanor@debian.org> + +--- a/beets/autotag/hooks.py ++++ b/beets/autotag/hooks.py +@@ -25,7 +25,7 @@ + from beets import config + from beets.util import as_string + from beets.autotag import mb +-from jellyfish import levenshtein_distance ++from beets.util._jellyfish import levenshtein_distance + from unidecode import unidecode + + log = logging.getLogger('beets') +--- /dev/null ++++ b/beets/util/_jellyfish.py +@@ -0,0 +1,56 @@ ++# Borrowed from Jellyfish (https://github.com/jamesturk/jellyfish) ++# Copyright (c) 2015, James Turk ++# Copyright (c) 2015, Sunlight Foundation ++# ++# All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions are met: ++# ++# * Redistributions of source code must retain the above copyright notice, ++# this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in the ++# documentation and/or other materials provided with the distribution. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE ++# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++# POSSIBILITY OF SUCH DAMAGE. ++ ++_range = xrange ++_no_bytes_err = 'expected unicode, got str' ++ ++ ++def levenshtein_distance(s1, s2): ++ if isinstance(s1, bytes) or isinstance(s2, bytes): ++ raise TypeError(_no_bytes_err) ++ ++ if s1 == s2: ++ return 0 ++ rows = len(s1)+1 ++ cols = len(s2)+1 ++ ++ if not s1: ++ return cols-1 ++ if not s2: ++ return rows-1 ++ ++ prev = None ++ cur = range(cols) ++ for r in _range(1, rows): ++ prev, cur = cur, [r] + [0]*(cols-1) ++ for c in _range(1, cols): ++ deletion = prev[c] + 1 ++ insertion = cur[c-1] + 1 ++ edit = prev[c-1] + (0 if s1[r-1] == s2[c-1] else 1) ++ cur[c] = min(edit, deletion, insertion) ++ ++ return cur[-1] +--- a/setup.py ++++ b/setup.py +@@ -92,7 +92,6 @@ + 'unidecode', + 'musicbrainzngs>=0.4', + 'pyyaml', +- 'jellyfish', + ] + (['colorama'] if (sys.platform == 'win32') else []), + + tests_require=[ diff -Nru beets-1.3.8+dfsg/debian/patches/series beets-1.3.19/debian/patches/series --- beets-1.3.8+dfsg/debian/patches/series 2015-01-18 06:38:27.000000000 +0000 +++ beets-1.3.19/debian/patches/series 2016-08-29 23:23:52.000000000 +0000 @@ -1,2 +1,5 @@ -disable-broken-tests -test_bucket-years +fix-test_hidden +no-jellyfish +fix-test_mediafile_edge +fix-test_nonexistent_file +skip-test_query-path-tests diff -Nru beets-1.3.8+dfsg/debian/patches/skip-test_query-path-tests beets-1.3.19/debian/patches/skip-test_query-path-tests --- beets-1.3.8+dfsg/debian/patches/skip-test_query-path-tests 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/debian/patches/skip-test_query-path-tests 2016-08-23 06:22:29.000000000 +0000 @@ -0,0 +1,80 @@ +Description: Skip failing tests caused by SQLITE_LIKE_DOESNT_MATCH_BLOBS + Path matching in beets is broken by SQLITE_LIKE_DOESNT_MATCH_BLOBS. + Let's just skip these tests until upstream has a solution. +Author: Stefano Rivera <stefanor@debian.org> +Bug-Upstream: https://github.com/beetbox/beets/issues/2172 + +--- a/test/test_query.py ++++ b/test/test_query.py +@@ -411,6 +411,7 @@ + self.patcher_samefile.stop() + self.patcher_exists.stop() + ++ @unittest.skip('unfixed (#2172)') + def test_path_exact_match(self): + q = u'path:/a/b/c.mp3' + results = self.lib.items(q) +@@ -419,6 +420,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, []) + ++ @unittest.skip('unfixed (#2172)') + def test_parent_directory_no_slash(self): + q = u'path:/a' + results = self.lib.items(q) +@@ -427,6 +429,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) + ++ @unittest.skip('unfixed (#2172)') + def test_parent_directory_with_slash(self): + q = u'path:/a/' + results = self.lib.items(q) +@@ -451,6 +454,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, []) + ++ @unittest.skip('unfixed (#2172)') + def test_nonnorm_path(self): + q = u'path:/x/../a/b' + results = self.lib.items(q) +@@ -459,6 +463,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) + ++ @unittest.skip('unfixed (#2172)') + def test_slashed_query_matches_path(self): + q = u'/a/b' + results = self.lib.items(q) +@@ -496,6 +501,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) + ++ @unittest.skip('unfixed (#2172)') + def test_escape_underscore(self): + self.add_album(path=b'/a/_/title.mp3', title=u'with underscore', + album=u'album with underscore') +@@ -506,6 +512,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with underscore']) + ++ @unittest.skip('unfixed (#2172)') + def test_escape_percent(self): + self.add_album(path=b'/a/%/title.mp3', title=u'with percent', + album=u'album with percent') +@@ -516,6 +523,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with percent']) + ++ @unittest.skip('unfixed (#2172)') + def test_escape_backslash(self): + self.add_album(path=br'/a/\x/title.mp3', title=u'with backslash', + album=u'album with backslash') +@@ -526,6 +534,7 @@ + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with backslash']) + ++ @unittest.skip('unfixed (#2172)') + def test_case_sensitivity(self): + self.add_album(path=b'/A/B/C2.mp3', title=u'caps path') + diff -Nru beets-1.3.8+dfsg/debian/patches/test_bucket-years beets-1.3.19/debian/patches/test_bucket-years --- beets-1.3.8+dfsg/debian/patches/test_bucket-years 2015-01-18 06:39:00.000000000 +0000 +++ beets-1.3.19/debian/patches/test_bucket-years 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -Description: test_bucket: update test_year_single_year - 2015 was used as an example of date outside of [1970-current year] intervall - which is not true anymore -Author: Fabrice Laporte <kraymer@gmail.com> -Origin: upstream, https://github.com/sampsyo/beets/commit/80038e2a3fe6f5ac174a30f6fd01ebf8dd63e414 -Bug-Debian: https://bugs.debian.org/775618 -Last-Updated: 2015-01-17 - ---- a/test/test_bucket.py -+++ b/test/test_bucket.py -@@ -51,7 +51,7 @@ - year.""" - self._setup_config(bucket_year=['1950', '1970']) - self.assertEqual(self.plugin._tmpl_bucket('2014'), '1970') -- self.assertEqual(self.plugin._tmpl_bucket('2015'), '2015') -+ self.assertEqual(self.plugin._tmpl_bucket('2025'), '2025') - - def test_year_two_years(self): - """Buckets can be named with the 'from-to' syntax.""" diff -Nru beets-1.3.8+dfsg/debian/tests/control beets-1.3.19/debian/tests/control --- beets-1.3.8+dfsg/debian/tests/control 2012-12-01 21:13:40.000000000 +0000 +++ beets-1.3.19/debian/tests/control 2016-08-30 04:56:10.000000000 +0000 @@ -1,2 +1,13 @@ Tests: unittests -Depends: @, python-nose, python-all, python-unittest2 +Depends: + beets, + python-all, + python-bs4, + python-flask, + python-mock, + python-mpd, + python-pathlib, + python-pylast, + python-rarfile, + python-responses, + python-xdg diff -Nru beets-1.3.8+dfsg/debian/tests/unittests beets-1.3.19/debian/tests/unittests --- beets-1.3.8+dfsg/debian/tests/unittests 2012-12-01 21:13:40.000000000 +0000 +++ beets-1.3.19/debian/tests/unittests 2016-08-30 05:00:59.000000000 +0000 @@ -5,12 +5,16 @@ export LC_ALL=C.UTF-8 export PYTHONPATH=/usr/share/beets -pythons="$(pyversions -rv)" +pythons="$(pyversions -r)" cp -a test $ADTTMP cd "$ADTTMP" -for py in $pythons; do - echo "=== python$py ===" - nosetests-$py test 2>&1 +# The tests assume beetsplug is found above them, and do unspeakable evil with +# __path__ to ensure that their local beetsplug is used. +ln -s /usr/share/beets/beetsplug . + +for python in $pythons; do + echo "=== $python ===" + $python -m unittest discover -p 'test_*' 2>&1 done diff -Nru beets-1.3.8+dfsg/debian/watch beets-1.3.19/debian/watch --- beets-1.3.8+dfsg/debian/watch 2015-01-18 06:38:27.000000000 +0000 +++ beets-1.3.19/debian/watch 2016-08-22 08:01:28.000000000 +0000 @@ -1,4 +1,4 @@ version=3 -opts=uversionmangle=s/(b|rc)/~$1/;s/$/\+dfsg/ \ -https://pypi.python.org/packages/source/b/beets/beets-(.+).tar.gz +opts=uversionmangle=s/(rc|a|b|c)/~$1/ \ +https://pypi.debian.net/beets/beets-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) diff -Nru beets-1.3.8+dfsg/docs/changelog.rst beets-1.3.19/docs/changelog.rst --- beets-1.3.8+dfsg/docs/changelog.rst 2014-09-18 02:16:42.000000000 +0000 +++ beets-1.3.19/docs/changelog.rst 2016-06-26 00:52:28.000000000 +0000 @@ -1,6 +1,961 @@ Changelog ========= +1.3.19 (June 25, 2016) +---------------------- + +This is primarily a bug fix release: it cleans up a couple of regressions that +appeared in the last version. But it also features the triumphant return of the +:doc:`/plugins/beatport` and a modernized :doc:`/plugins/bpd`. + +It's also the first version where beets passes all its tests on Windows! May +this herald a new age of cross-platform reliability for beets. + +New features: + +* :doc:`/plugins/beatport`: This metadata source plugin has arisen from the + dead! It now works with Beatport's new OAuth-based API. Thanks to + :user:`jbaiter`. :bug:`1989` :bug:`2067` +* :doc:`/plugins/bpd`: The plugin now uses the modern GStreamer 1.0 instead of + the old 0.10. Thanks to :user:`philippbeckmann`. :bug:`2057` :bug:`2062` +* A new ``--force`` option for the :ref:`remove-cmd` command allows removal of + items without prompting beforehand. :bug:`2042` +* A new :ref:`duplicate_action` importer config option controls how duplicate + albums or tracks treated in import task. :bug:`185` + +Some fixes for Windows: + +* Queries are now detected as paths when they contain backslashes (in + addition to forward slashes). This only applies on Windows. +* :doc:`/plugins/embedart`: Image similarity comparison with ImageMagick + should now work on Windows. +* :doc:`/plugins/fetchart`: The plugin should work more reliably with + non-ASCII paths. + +And other fixes: + +* :doc:`/plugins/replaygain`: The ``bs1770gain`` backend now correctly + calculates sample peak instead of true peak. This comes with a major + speed increase. :bug:`2031` +* :doc:`/plugins/lyrics`: Avoid a crash and a spurious warning introduced in + the last version about a Google API key, which appeared even when you hadn't + enabled the Google lyrics source. +* Fix a hard-coded path to ``bash-completion`` to work better with Homebrew + installations. Thanks to :user:`bismark`. :bug:`2038` +* Fix a crash introduced in the previous version when the standard input was + connected to a Unix pipe. :bug:`2041` +* Fix a crash when specifying non-ASCII format strings on the command line + with the ``-f`` option for many commands. :bug:`2063` +* :doc:`/plugins/fetchart`: Determine the file extension for downloaded images + based on the image's magic bytes. The plugin prints a warning if result is + not consistent with the server-supplied ``Content-Type`` header. In previous + versions, the plugin would use a ``.jpg`` extension for all images. + :bug:`2053` + + +1.3.18 (May 31, 2016) +--------------------- + +This update adds a new :doc:`/plugins/hook` that lets you integrate beets with +command-line tools and an :doc:`/plugins/export` that can dump data from the +beets database as JSON. You can also automatically translate lyrics using a +machine translation service. + +The ``echonest`` plugin has been removed in this version because the API it +used is `shutting down`_. You might want to try the +:doc:`/plugins/acousticbrainz` instead. + +.. _shutting down: https://developer.spotify.com/news-stories/2016/03/29/api-improvements-update/ + +Some of the larger new features: + +* The new :doc:`/plugins/hook` lets you execute commands in response to beets + events. +* The new :doc:`/plugins/export` can export data from beets' database as + JSON. Thanks to :user:`GuilhermeHideki`. +* :doc:`/plugins/lyrics`: The plugin can now translate the fetched lyrics to + your native language using the Bing translation API. Thanks to + :user:`Kraymer`. +* :doc:`/plugins/fetchart`: Album art can now be fetched from `fanart.tv`_. + +Smaller new things: + +* There are two new functions available in templates: ``%first`` and ``%ifdef``. + See :ref:`template-functions`. +* :doc:`/plugins/convert`: A new `album_art_maxwidth` setting lets you resize + album art while copying it. +* :doc:`/plugins/convert`: The `extension` setting is now optional for + conversion formats. By default, the extension is the same as the name of the + configured format. +* :doc:`/plugins/importadded`: A new `preserve_write_mtimes` option + lets you preserve mtime of files even when beets updates their metadata. +* :doc:`/plugins/fetchart`: The `enforce_ratio` option now lets you tolerate + images that are *almost* square but differ slightly from an exact 1:1 + aspect ratio. +* :doc:`/plugins/fetchart`: The plugin can now optionally save the artwork's + source in an attribute in the database. +* The :ref:`terminal_encoding` configuration option can now also override the + *input* encoding. (Previously, it only affected the encoding of the standard + *output* stream.) +* A new :ref:`ignore_hidden` configuration option lets you ignore files that + your OS marks as invisible. +* :doc:`/plugins/web`: A new `values` endpoint lets you get the distinct values + of a field. Thanks to :user:`sumpfralle`. :bug:`2010` + +.. _fanart.tv: https://fanart.tv/ + +Fixes: + +* Fix a problem with the :ref:`stats-cmd` command in exact mode when filenames + on Windows use non-ASCII characters. :bug:`1891` +* Fix a crash when iTunes Sound Check tags contained invalid data. :bug:`1895` +* :doc:`/plugins/mbcollection`: The plugin now redacts your MusicBrainz + password in the ``beet config`` output. :bug:`1907` +* :doc:`/plugins/scrub`: Fix an occasional problem where scrubbing on import + could undo the :ref:`id3v23` setting. :bug:`1903` +* :doc:`/plugins/lyrics`: Add compatibility with some changes to the + LyricsWiki page markup. :bug:`1912` :bug:`1909` +* :doc:`/plugins/lyrics`: Fix retrieval from Musixmatch by improving the way + we guess the URL for lyrics on that service. :bug:`1880` +* :doc:`/plugins/edit`: Fail gracefully when the configured text editor + command can't be invoked. :bug:`1927` +* :doc:`/plugins/fetchart`: Fix a crash in the Wikipedia backend on non-ASCII + artist and album names. :bug:`1960` +* :doc:`/plugins/convert`: Change the default `ogg` encoding quality from 2 to + 3 (to fit the default from the `oggenc(1)` manpage). :bug:`1982` +* :doc:`/plugins/convert`: The `never_convert_lossy_files` option now + considers AIFF a lossless format. :bug:`2005` +* :doc:`/plugins/web`: A proper 404 error, instead of an internal exception, + is returned when missing album art is requested. Thanks to + :user:`sumpfralle`. :bug:`2011` +* Tolerate more malformed floating-point numbers in metadata tags. :bug:`2014` +* The :ref:`ignore` configuration option now includes the ``lost+found`` + directory by default. +* :doc:`/plugins/acousticbrainz`: AcousticBrainz lookups are now done over + HTTPS. Thanks to :user:`Freso`. :bug:`2007` + + +1.3.17 (February 7, 2016) +------------------------- + +This release introduces one new plugin to fetch audio information from the +`AcousticBrainz`_ project and another plugin to make it easier to submit your +handcrafted metadata back to MusicBrainz. +The importer also gained two oft-requested features: a way to skip the initial +search process by specifying an ID ahead of time, and a way to *manually* +provide metadata in the middle of the import process (via the +:doc:`/plugins/edit`). + +Also, as of this release, the beets project has some new Internet homes! Our +new domain name is `beets.io`_, and we have a shiny new GitHub organization: +`beetbox`_. + +Here are the big new features: + +* A new :doc:`/plugins/acousticbrainz` fetches acoustic-analysis information + from the `AcousticBrainz`_ project. Thanks to :user:`opatel99`, and thanks + to `Google Code-In`_! :bug:`1784` +* A new :doc:`/plugins/mbsubmit` lets you print music's current metadata in a + format that the MusicBrainz data parser can understand. You can trigger it + during an interactive import session. :bug:`1779` +* A new ``--search-id`` importer option lets you manually specify + IDs (i.e., MBIDs or Discogs IDs) for imported music. Doing this skips the + initial candidate search, which can be important for huge albums where this + initial lookup is slow. + Also, the ``enter Id`` prompt choice now accepts several IDs, separated by + spaces. :bug:`1808` +* :doc:`/plugins/edit`: You can now edit metadata *on the fly* during the + import process. The plugin provides two new interactive options: one to edit + *your music's* metadata, and one to edit the *matched metadata* retrieved + from MusicBrainz (or another data source). This feature is still in its + early stages, so please send feedback if you find anything missing. + :bug:`1846` :bug:`396` + +There are even more new features: + +* :doc:`/plugins/fetchart`: The Google Images backend has been restored. It + now requires an API key from Google. Thanks to :user:`lcharlick`. + :bug:`1778` +* :doc:`/plugins/info`: A new option will print only fields' names and not + their values. Thanks to :user:`GuilhermeHideki`. :bug:`1812` +* The :ref:`fields-cmd` command now displays flexible attributes. + Thanks to :user:`GuilhermeHideki`. :bug:`1818` +* The :ref:`modify-cmd` command lets you interactively select which albums or + items you want to change. :bug:`1843` +* The :ref:`move-cmd` command gained a new ``--timid`` flag to print and + confirm which files you want to move. :bug:`1843` +* The :ref:`move-cmd` command no longer prints filenames for files that + don't actually need to be moved. :bug:`1583` + +.. _Google Code-In: https://codein.withgoogle.com/ +.. _AcousticBrainz: http://acousticbrainz.org/ + +Fixes: + +* :doc:`/plugins/play`: Fix a regression in the last version where there was + no default command. :bug:`1793` +* :doc:`/plugins/lastimport`: The plugin now works again after being broken by + some unannounced changes to the Last.fm API. :bug:`1574` +* :doc:`/plugins/play`: Fixed a typo in a configuration option. The option is + now ``warning_threshold`` instead of ``warning_treshold``, but we kept the + old name around for compatibility. Thanks to :user:`JesseWeinstein`. + :bug:`1802` :bug:`1803` +* :doc:`/plugins/edit`: Editing metadata now moves files, when appropriate + (like the :ref:`modify-cmd` command). :bug:`1804` +* The :ref:`stats-cmd` command no longer crashes when files are missing or + inaccessible. :bug:`1806` +* :doc:`/plugins/fetchart`: Possibly fix a Unicode-related crash when using + some versions of pyOpenSSL. :bug:`1805` +* :doc:`/plugins/replaygain`: Fix an intermittent crash with the GStreamer + backend. :bug:`1855` +* :doc:`/plugins/lastimport`: The plugin now works with the beets API key by + default. You can still provide a different key the configuration. +* :doc:`/plugins/replaygain`: Fix a crash using the Python Audio Tools + backend. :bug:`1873` + +.. _beets.io: http://beets.io/ +.. _Beetbox: https://github.com/beetbox + + + +1.3.16 (December 28, 2015) +-------------------------- + +The big news in this release is a new :doc:`interactive editor plugin +</plugins/edit>`. It's really nifty: you can now change your music's metadata +by making changes in a visual text editor, which can sometimes be far more +efficient than the built-in :ref:`modify-cmd` command. No more carefully +retyping the same artist name with slight capitalization changes. + +This version also adds an oft-requested "not" operator to beets' queries, so +you can exclude music from any operation. It also brings friendlier formatting +(and querying!) of song durations. + +The big new stuff: + +* A new :doc:`/plugins/edit` lets you manually edit your music's metadata + using your favorite text editor. :bug:`164` :bug:`1706` +* Queries can now use "not" logic. Type a ``^`` before part of a query to + *exclude* matching music from the results. For example, ``beet list -a + beatles ^album:1`` will find all your albums by the Beatles except for their + singles compilation, "1." See :ref:`not_query`. :bug:`819` :bug:`1728` +* A new :doc:`/plugins/embyupdate` can trigger a library refresh on an `Emby`_ + server when your beets database changes. +* Track length is now displayed as "M:SS" rather than a raw number of seconds. + Queries on track length also accept this format: for example, ``beet list + length:5:30..`` will find all your tracks that have a duration over 5 + minutes and 30 seconds. You can turn off this new behavior using the + ``format_raw_length`` configuration option. :bug:`1749` + +Smaller changes: + +* Three commands, ``modify``, ``update``, and ``mbsync``, would previously + move files by default after changing their metadata. Now, these commands + will only move files if you have the :ref:`config-import-copy` or + :ref:`config-import-move` options enabled in your importer configuration. + This way, if you configure the importer not to touch your filenames, other + commands will respect that decision by default too. Each command also + sprouted a ``--move`` command-line option to override this default (in + addition to the ``--nomove`` flag they already had). :bug:`1697` +* A new configuration option, ``va_name``, controls the album artist name for + various-artists albums. The setting defaults to "Various Artists," the + MusicBrainz standard. In order to match MusicBrainz, the + :doc:`/plugins/discogs` also adopts the same setting. +* :doc:`/plugins/info`: The ``info`` command now accepts a ``-f/--format`` + option for customizing how items are displayed, just like the built-in + ``list`` command. :bug:`1737` + +Some changes for developers: + +* Two new :ref:`plugin hooks <plugin_events>`, ``albuminfo_received`` and + ``trackinfo_received``, let plugins intercept metadata as soon as it is + received, before it is applied to music in the database. :bug:`872` +* Plugins can now add options to the interactive importer prompts. See + :ref:`append_prompt_choices`. :bug:`1758` + +Fixes: + +* :doc:`/plugins/plexupdate`: Fix a crash when Plex libraries use non-ASCII + collection names. :bug:`1649` +* :doc:`/plugins/discogs`: Maybe fix a crash when using some versions of the + ``requests`` library. :bug:`1656` +* Fix a race in the importer when importing two albums with the same artist + and name in quick succession. The importer would fail to detect them as + duplicates, claiming that there were "empty albums" in the database even + when there were not. :bug:`1652` +* :doc:`plugins/lastgenre`: Clean up the reggae-related genres somewhat. + Thanks to :user:`Freso`. :bug:`1661` +* The importer now correctly moves album art files when re-importing. + :bug:`314` +* :doc:`/plugins/fetchart`: In auto mode, the plugin now skips albums that + already have art attached to them so as not to interfere with re-imports. + :bug:`314` +* :doc:`plugins/fetchart`: The plugin now only resizes album art if necessary, + rather than always by default. :bug:`1264` +* :doc:`plugins/fetchart`: Fix a bug where a database reference to a + non-existent album art file would prevent the command from fetching new art. + :bug:`1126` +* :doc:`/plugins/thumbnails`: Fix a crash with Unicode paths. :bug:`1686` +* :doc:`/plugins/embedart`: The ``remove_art_file`` option now works on import + (as well as with the explicit command). :bug:`1662` :bug:`1675` +* :doc:`/plugins/metasync`: Fix a crash when syncing with recent versions of + iTunes. :bug:`1700` +* :doc:`/plugins/duplicates`: Fix a crash when merging items. :bug:`1699` +* :doc:`/plugins/smartplaylist`: More gracefully handle malformed queries and + missing configuration. +* Fix a crash with some files with unreadable iTunes SoundCheck metadata. + :bug:`1666` +* :doc:`/plugins/thumbnails`: Fix a nasty segmentation fault crash that arose + with some library versions. :bug:`1433` +* :doc:`/plugins/convert`: Fix a crash with Unicode paths in ``--pretend`` + mode. :bug:`1735` +* Fix a crash when sorting by nonexistent fields on queries. :bug:`1734` +* Probably fix some mysterious errors when dealing with images using + ImageMagick on Windows. :bug:`1721` +* Fix a crash when writing some Unicode comment strings to MP3s that used + older encodings. The encoding is now always updated to UTF-8. :bug:`879` +* :doc:`/plugins/fetchart`: The Google Images backend has been removed. It + used an API that has been shut down. :bug:`1760` +* :doc:`/plugins/lyrics`: Fix a crash in the Google backend when searching for + bands with regular-expression characters in their names, like Sunn O))). + :bug:`1673` +* :doc:`/plugins/scrub`: In ``auto`` mode, the plugin now *actually* only + scrubs files on import, as the documentation always claimed it did---not + every time files were written, as it previously did. :bug:`1657` +* :doc:`/plugins/scrub`: Also in ``auto`` mode, album art is now correctly + restored. :bug:`1657` +* Possibly allow flexible attributes to be used with the ``%aunique`` template + function. :bug:`1775` +* :doc:`/plugins/lyrics`: The Genius backend is now more robust to + communication errors. The backend has also been disabled by default, since + the API it depends on is currently down. :bug:`1770` + +.. _Emby: http://emby.media + + +1.3.15 (October 17, 2015) +------------------------- + +This release adds a new plugin for checking file quality and a new source for +lyrics. The larger features are: + +* A new :doc:`/plugins/badfiles` helps you scan for corruption in your music + collection. Thanks to :user:`fxthomas`. :bug:`1568` +* :doc:`/plugins/lyrics`: You can now fetch lyrics from Genius.com. + Thanks to :user:`sadatay`. :bug:`1626` :bug:`1639` +* :doc:`/plugins/zero`: The plugin can now use a "whitelist" policy as an + alternative to the (default) "blacklist" mode. Thanks to :user:`adkow`. + :bug:`1621` :bug:`1641` + +And there are smaller new features too: + +* Add new color aliases for standard terminal color names (e.g., cyan and + magenta). Thanks to :user:`mathstuf`. :bug:`1548` +* :doc:`/plugins/play`: A new ``--args`` option lets you specify options for + the player command. :bug:`1532` +* :doc:`/plugins/play`: A new ``raw`` configuration option lets the command + work with players (such as VLC) that expect music filenames as arguments, + rather than in a playlist. Thanks to :user:`nathdwek`. :bug:`1578` +* :doc:`/plugins/play`: You can now configure the number of tracks that + trigger a "lots of music" warning. :bug:`1577` +* :doc:`/plugins/embedart`: A new ``remove_art_file`` option lets you clean up + if you prefer *only* embedded album art. Thanks to :user:`jackwilsdon`. + :bug:`1591` :bug:`733` +* :doc:`/plugins/plexupdate`: A new ``library_name`` option allows you to select + which Plex library to update. :bug:`1572` :bug:`1595` +* A new ``include`` option lets you import external configuration files. + +This release has plenty of fixes: + +* :doc:`/plugins/lastgenre`: Fix a bug that prevented tag popularity from + being considered. Thanks to :user:`svoos`. :bug:`1559` +* Fixed a bug where plugins wouldn't be notified of the deletion of an item's + art, for example with the ``clearart`` command from the + :doc:`/plugins/embedart`. Thanks to :user:`nathdwek`. :bug:`1565` +* :doc:`/plugins/fetchart`: The Google Images source is disabled by default + (as it was before beets 1.3.9), as is the Wikipedia source (which was + causing lots of unnecessary delays due to DBpedia downtime). To re-enable + these sources, add ``wikipedia google`` to your ``sources`` configuration + option. +* The :ref:`list-cmd` command's help output now has a small query and format + string example. Thanks to :user:`pkess`. :bug:`1582` +* :doc:`/plugins/fetchart`: The plugin now fetches PNGs but not GIFs. (It + still fetches JPEGs.) This avoids an error when trying to embed images, + since not all formats support GIFs. :bug:`1588` +* Date fields are now written in the correct order (year-month-day), which + eliminates an intermittent bug where the latter two fields would not get + written to files. Thanks to :user:`jdetrey`. :bug:`1303` :bug:`1589` +* :doc:`/plugins/replaygain`: Avoid a crash when the PyAudioTools backend + encounters an error. :bug:`1592` +* The case sensitivity of path queries is more useful now: rather than just + guessing based on the platform, we now check the case sensitivity of your + filesystem. :bug:`1586` +* Case-insensitive path queries might have returned nothing because of a + wrong SQL query. +* Fix a crash when a query contains a "+" or "-" alone in a component. + :bug:`1605` +* Fixed unit of file size to powers of two (MiB, GiB, etc.) instead of powers + of ten (MB, GB, etc.). :bug:`1623` + + +1.3.14 (August 2, 2015) +----------------------- + +This is mainly a bugfix release, but we also have a nifty new plugin for +`ipfs`_ and a bunch of new configuration options. + +The new features: + +* A new :doc:`/plugins/ipfs` lets you share music via a new, global, + decentralized filesystem. :bug:`1397` +* :doc:`/plugins/duplicates`: You can now merge duplicate + track metadata (when detecting duplicate items), or duplicate album + tracks (when detecting duplicate albums). +* :doc:`/plugins/duplicates`: Duplicate resolution now uses an ordering to + prioritize duplicates. By default, it prefers music with more complete + metadata, but you can configure it to use any list of attributes. +* :doc:`/plugins/metasync`: Added a new backend to fetch metadata from iTunes. + This plugin is still in an experimental phase. :bug:`1450` +* The `move` command has a new ``--pretend`` option, making the command show + how the items will be moved without actually changing anything. +* The importer now supports matching of "pregap" or HTOA (hidden track-one + audio) tracks when they are listed in MusicBrainz. (This feature depends on a + new version of the ``musicbrainzngs`` library that is not yet released, but + will start working when it is available.) Thanks to :user:`ruippeixotog`. + :bug:`1104` :bug:`1493` +* :doc:`/plugins/plexupdate`: A new ``token`` configuration option lets you + specify a key for Plex Home setups. Thanks to :user:`edcarroll`. :bug:`1494` + +Fixes: + +* :doc:`/plugins/fetchart`: Complain when the `enforce_ratio` + or `min_width` options are enabled but no local imaging backend is available + to carry them out. :bug:`1460` +* :doc:`/plugins/importfeeds`: Avoid generating incorrect m3u filename when + both of the `m3u` and `m3u_multi` options are enabled. :bug:`1490` +* :doc:`/plugins/duplicates`: Avoid a crash when misconfigured. :bug:`1457` +* :doc:`/plugins/mpdstats`: Avoid a crash when the music played is not in the + beets library. Thanks to :user:`CodyReichert`. :bug:`1443` +* Fix a crash with ArtResizer on Windows systems (affecting + :doc:`/plugins/embedart`, :doc:`/plugins/fetchart`, + and :doc:`/plugins/thumbnails`). :bug:`1448` +* :doc:`/plugins/permissions`: Fix an error with non-ASCII paths. :bug:`1449` +* Fix sorting by paths when the :ref:`sort_case_insensitive` option is + enabled. :bug:`1451` +* :doc:`/plugins/embedart`: Avoid an error when trying to embed invalid images + into MPEG-4 files. +* :doc:`/plugins/fetchart`: The Wikipedia source can now better deal artists + that use non-standard capitalization (e.g., alt-J, dEUS). +* :doc:`/plugins/web`: Fix searching for non-ASCII queries. Thanks to + :user:`oldtopman`. :bug:`1470` +* :doc:`/plugins/mpdupdate`: We now recommend the newer ``python-mpd2`` + library instead of its unmaintained parent. Thanks to :user:`Somasis`. + :bug:`1472` +* The importer interface and log file now output a useful list of files + (instead of the word "None") when in album-grouping mode. :bug:`1475` + :bug:`825` +* Fix some logging errors when filenames and other user-provided strings + contain curly braces. :bug:`1481` +* Regular expression queries over paths now work more reliably with non-ASCII + characters in filenames. :bug:`1482` +* Fix a bug where the autotagger's :ref:`ignored` setting was sometimes, well, + ignored. :bug:`1487` +* Fix a bug with Unicode strings when generating image thumbnails. :bug:`1485` +* :doc:`/plugins/keyfinder`: Fix handling of Unicode paths. :bug:`1502` +* :doc:`/plugins/fetchart`: When album art is already present, the message is + now printed in the ``text_highlight_minor`` color (light gray). Thanks to + :user:`Somasis`. :bug:`1512` +* Some messages in the console UI now use plural nouns correctly. Thanks to + :user:`JesseWeinstein`. :bug:`1521` +* Sorting numerical fields (such as track) now works again. :bug:`1511` +* :doc:`/plugins/replaygain`: Missing GStreamer plugins now cause a helpful + error message instead of a crash. :bug:`1518` +* Fix an edge case when producing sanitized filenames where the maximum path + length conflicted with the :ref:`replace` rules. Thanks to Ben Ockmore. + :bug:`496` :bug:`1361` +* Fix an incompatibility with OS X 10.11 (where ``/usr/sbin`` seems not to be + on the user's path by default). +* Fix an incompatibility with certain JPEG files. Here's a relevant `Python + bug`_. Thanks to :user:`nathdwek`. :bug:`1545` +* Fix the :ref:`group_albums` importer mode so that it works correctly when + files are not already in order by album. :bug:`1550` +* The ``fields`` command no longer separates built-in fields from + plugin-provided ones. This distinction was becoming increasingly unreliable. +* :doc:`/plugins/duplicates`: Fix a Unicode warning when paths contained + non-ASCII characters. :bug:`1551` +* :doc:`/plugins/fetchart`: Work around a urllib3 bug that could cause a + crash. :bug:`1555` :bug:`1556` +* When you edit the configuration file with ``beet config -e`` and the file + does not exist, beets creates an empty file before editing it. This fixes an + error on OS X, where the ``open`` command does not work with non-existent + files. :bug:`1480` + +.. _Python bug: http://bugs.python.org/issue16512 +.. _ipfs: http://ipfs.io + + +1.3.13 (April 24, 2015) +----------------------- + +This is a tiny bug-fix release. It copes with a dependency upgrade that broke +beets. There are just two fixes: + +* Fix compatibility with `Jellyfish`_ version 0.5.0. +* :doc:`/plugins/embedart`: In ``auto`` mode (the import hook), the plugin now + respects the ``write`` config option under ``import``. If this is disabled, + album art is no longer embedded on import in order to leave files + untouched---in effect, ``auto`` is implicitly disabled. :bug:`1427` + + +1.3.12 (April 18, 2015) +----------------------- + +This little update makes queries more powerful, sorts music more +intelligently, and removes a performance bottleneck. There's an experimental +new plugin for synchronizing metadata with music players. + +Packagers should also note a new dependency in this version: the `Jellyfish`_ +Python library makes our text comparisons (a big part of the auto-tagging +process) go much faster. + +New features: + +* Queries can now use **"or" logic**: if you use a comma to separate parts of a + query, items and albums will match *either* side of the comma. For example, + ``beet ls foo , bar`` will get all the items matching `foo` or matching + `bar`. See :ref:`combiningqueries`. :bug:`1423` +* The autotagger's **matching algorithm is faster**. We now use the + `Jellyfish`_ library to compute string similarity, which is better optimized + than our hand-rolled edit distance implementation. :bug:`1389` +* Sorting is now **case insensitive** by default. This means that artists will + be sorted lexicographically regardless of case. For example, the artist + alt-J will now properly sort before YACHT. (Previously, it would have ended + up at the end of the list, after all the capital-letter artists.) + You can turn this new behavior off using the :ref:`sort_case_insensitive` + configuration option. See :ref:`query-sort`. :bug:`1429` +* An experimental new :doc:`/plugins/metasync` lets you get metadata from your + favorite music players, starting with Amarok. :bug:`1386` +* :doc:`/plugins/fetchart`: There are new settings to control what constitutes + "acceptable" images. The `minwidth` option constrains the minimum image + width in pixels and the `enforce_ratio` option requires that images be + square. :bug:`1394` + +Little fixes and improvements: + +* :doc:`/plugins/fetchart`: Remove a hard size limit when fetching from the + Cover Art Archive. +* The output of the :ref:`fields-cmd` command is now sorted. Thanks to + :user:`multikatt`. :bug:`1402` +* :doc:`/plugins/replaygain`: Fix a number of issues with the new + ``bs1770gain`` backend on Windows. Also, fix missing debug output in import + mode. :bug:`1398` +* Beets should now be better at guessing the appropriate output encoding on + Windows. (Specifically, the console output encoding is guessed separately + from the encoding for command-line arguments.) A bug was also fixed where + beets would ignore the locale settings and use UTF-8 by default. :bug:`1419` +* :doc:`/plugins/discogs`: Better error handling when we can't communicate + with Discogs on setup. :bug:`1417` +* :doc:`/plugins/importadded`: Fix a crash when importing singletons in-place. + :bug:`1416` +* :doc:`/plugins/fuzzy`: Fix a regression causing a crash in the last release. + :bug:`1422` +* Fix a crash when the importer cannot open its log file. Thanks to + :user:`barsanuphe`. :bug:`1426` +* Fix an error when trying to write tags for items with flexible fields called + `date` and `original_date` (which are not built-in beets fields). + :bug:`1404` + +.. _Jellyfish: https://github.com/sunlightlabs/jellyfish + + +1.3.11 (April 5, 2015) +---------------------- + +In this release, we refactored the logging system to be more flexible and more +useful. There are more granular levels of verbosity, the output from plugins +should be more consistent, and several kinds of logging bugs should be +impossible in the future. + +There are also two new plugins: one for filtering the files you import and an +evolved plugin for using album art as directory thumbnails in file managers. +There's a new source for album art, and the importer now records the source of +match data. This is a particularly huge release---there's lots more below. + +There's one big change with this release: **Python 2.6 is no longer +supported**. You'll need Python 2.7. Please trust us when we say this let us +remove a surprising number of ugly hacks throughout the code. + +Major new features and bigger changes: + +* There are now **multiple levels of output verbosity**. On the command line, + you can make beets somewhat verbose with ``-v`` or very verbose with + ``-vv``. For the importer especially, this makes the first verbose mode much + more manageable, while still preserving an option for overwhelmingly verbose + debug output. :bug:`1244` +* A new :doc:`/plugins/filefilter` lets you write regular expressions to + automatically **avoid importing** certain files. Thanks to :user:`mried`. + :bug:`1186` +* A new :doc:`/plugins/thumbnails` generates cover-art **thumbnails for + album folders** for Freedesktop.org-compliant file managers. (This replaces + the :doc:`/plugins/freedesktop`, which only worked with the Dolphin file + manager.) +* :doc:`/plugins/replaygain`: There is a new backend that uses the + `bs1770gain`_ analysis tool. Thanks to :user:`jmwatte`. :bug:`1343` +* A new ``filesize`` field on items indicates the number of bytes in the file. + :bug:`1291` +* A new :ref:`searchlimit` configuration option allows you to specify how many + search results you wish to see when looking up releases at MusicBrainz + during import. :bug:`1245` +* The importer now records the data source for a match in a new + flexible attribute `data_source` on items and albums. :bug:`1311` +* The colors used in the terminal interface are now configurable via the new + config option ``colors``, nested under the option ``ui``. (Also, the `color` + config option has been moved from top-level to under ``ui``. Beets will + respect the old color setting, but will warn the user with a deprecation + message.) :bug:`1238` +* :doc:`/plugins/fetchart`: There's a new Wikipedia image source that uses + DBpedia to find albums. Thanks to Tom Jaspers. :bug:`1194` +* In the :ref:`config-cmd` command, the output is now redacted by default. + Sensitive information like passwords and API keys is not included. The new + ``--clear`` option disables redaction. :bug:`1376` + +You should probably also know about these core changes to the way beets works: + +* As mentioned above, Python 2.6 is no longer supported. +* The ``tracktotal`` attribute is now a *track-level field* instead of an + album-level one. This field stores the total number of tracks on the + album, or if the :ref:`per_disc_numbering` config option is set, the total + number of tracks on a particular medium (i.e., disc). The field was causing + problems with that :ref:`per_disc_numbering` mode: different discs on the + same album needed different track totals. The field can now work correctly + in either mode. +* To replace ``tracktotal`` as an album-level field, there is a new + ``albumtotal`` computed attribute that provides the total number of tracks + on the album. (The :ref:`per_disc_numbering` option has no influence on this + field.) +* The `list_format_album` and `list_format_item` configuration keys + now affect (almost) every place where objects are printed and logged. + (Previously, they only controlled the :ref:`list-cmd` command and a few + other scattered pieces.) :bug:`1269` +* Relatedly, the ``beet`` program now accept top-level options + ``--format-item`` and ``--format-album`` before any subcommand to control + how items and albums are displayed. :bug:`1271` +* `list_format_album` and `list_format_album` have respectively been + renamed :ref:`format_album` and :ref:`format_item`. The old names still work + but each triggers a warning message. :bug:`1271` +* :ref:`Path queries <pathquery>` are automatically triggered only if the + path targeted by the query exists. Previously, just having a slash somewhere + in the query was enough, so ``beet ls AC/DC`` wouldn't work to refer to the + artist. + +There are also lots of medium-sized features in this update: + +* :doc:`/plugins/duplicates`: The command has a new ``--strict`` option + that will only report duplicates if all attributes are explicitly set. + :bug:`1000` +* :doc:`/plugins/smartplaylist`: Playlist updating should now be faster: the + plugin detects, for each playlist, whether it needs to be regenerated, + instead of obliviously regenerating all of them. The ``splupdate`` command + can now also take additional parameters that indicate the names of the + playlists to regenerate. +* :doc:`/plugins/play`: The command shows the output of the underlying player + command and lets you interact with it. :bug:`1321` +* The summary shown to compare duplicate albums during import now displays + the old and new filesizes. :bug:`1291` +* :doc:`/plugins/lastgenre`: Add *comedy*, *humor*, and *stand-up* as well as + a longer list of classical music genre tags to the built-in whitelist and + canonicalization tree. :bug:`1206` :bug:`1239` :bug:`1240` +* :doc:`/plugins/web`: Add support for *cross-origin resource sharing* for + more flexible in-browser clients. Thanks to Andre Miller. :bug:`1236` + :bug:`1237` +* :doc:`plugins/mbsync`: A new ``-f/--format`` option controls the output + format when listing unrecognized items. The output is also now more helpful + by default. :bug:`1246` +* :doc:`/plugins/fetchart`: A new option, ``-n``, extracts the cover art of + all matched albums into their respective directories. Another new flag, + ``-a``, associates the extracted files with the albums in the database. + :bug:`1261` +* :doc:`/plugins/info`: A new option, ``-i``, can display only a specified + subset of properties. :bug:`1287` +* The number of missing/unmatched tracks is shown during import. :bug:`1088` +* :doc:`/plugins/permissions`: The plugin now also adjusts the permissions of + the directories. (Previously, it only affected files.) :bug:`1308` :bug:`1324` +* :doc:`/plugins/ftintitle`: You can now configure the format that the plugin + uses to add the artist to the title. Thanks to :user:`amishb`. :bug:`1377` + +And many little fixes and improvements: + +* :doc:`/plugins/replaygain`: Stop applying replaygain directly to source files + when using the mp3gain backend. :bug:`1316` +* Path queries are case-sensitive on non-Windows OSes. :bug:`1165` +* :doc:`/plugins/lyrics`: Silence a warning about insecure requests in the new + MusixMatch backend. :bug:`1204` +* Fix a crash when ``beet`` is invoked without arguments. :bug:`1205` + :bug:`1207` +* :doc:`/plugins/fetchart`: Do not attempt to import directories as album art. + :bug:`1177` :bug:`1211` +* :doc:`/plugins/mpdstats`: Avoid double-counting some play events. :bug:`773` + :bug:`1212` +* Fix a crash when the importer deals with Unicode metadata in ``--pretend`` + mode. :bug:`1214` +* :doc:`/plugins/smartplaylist`: Fix ``album_query`` so that individual files + are added to the playlist instead of directories. :bug:`1225` +* Remove the ``beatport`` plugin. `Beatport`_ has shut off public access to + their API and denied our request for an account. We have not heard from the + company since 2013, so we are assuming access will not be restored. +* Incremental imports now (once again) show a "skipped N directories" message. +* :doc:`/plugins/embedart`: Handle errors in ImageMagick's output. :bug:`1241` +* :doc:`/plugins/keyfinder`: Parse the underlying tool's output more robustly. + :bug:`1248` +* :doc:`/plugins/embedart`: We now show a comprehensible error message when + ``beet embedart -f FILE`` is given a non-existent path. :bug:`1252` +* Fix a crash when a file has an unrecognized image type tag. Thanks to + Matthias Kiefer. :bug:`1260` +* :doc:`/plugins/importfeeds` and :doc:`/plugins/smartplaylist`: Automatically + create parent directories for playlist files (instead of crashing when the + parent directory does not exist). :bug:`1266` +* The :ref:`write-cmd` command no longer tries to "write" non-writable fields, + such as the bitrate. :bug:`1268` +* The error message when MusicBrainz is not reachable on the network is now + much clearer. Thanks to Tom Jaspers. :bug:`1190` :bug:`1272` +* Improve error messages when parsing query strings with shlex. :bug:`1290` +* :doc:`/plugins/embedart`: Fix a crash that occured when used together + with the *check* plugin. :bug:`1241` +* :doc:`/plugins/scrub`: Log an error instead of stopping when the ``beet + scrub`` command cannot write a file. Also, avoid problems on Windows with + Unicode filenames. :bug:`1297` +* :doc:`/plugins/discogs`: Handle and log more kinds of communication + errors. :bug:`1299` :bug:`1305` +* :doc:`/plugins/lastgenre`: Bugs in the `pylast` library can no longer crash + beets. +* :doc:`/plugins/convert`: You can now configure the temporary directory for + conversions. Thanks to :user:`autochthe`. :bug:`1382` :bug:`1383` +* :doc:`/plugins/rewrite`: Fix a regression that prevented the plugin's + rewriting from applying to album-level fields like ``$albumartist``. + :bug:`1393` +* :doc:`/plugins/play`: The plugin now sorts items according to the + configuration in album mode. +* :doc:`/plugins/fetchart`: The name for extracted art files is taken from the + ``art_filename`` configuration option. :bug:`1258` +* When there's a parse error in a query (for example, when you type a + malformed date in a :ref:`date query <datequery>`), beets now stops with an + error instead of silently ignoring the query component. + +For developers: + +* The ``database_change`` event now sends the item or album that is subject to + a change. +* The ``OptionParser`` is now a ``CommonOptionsParser`` that offers facilities + for adding usual options (``--album``, ``--path`` and ``--format``). See + :ref:`add_subcommands`. :bug:`1271` +* The logging system in beets has been overhauled. Plugins now each have their + own logger, which helps by automatically adjusting the verbosity level in + import mode and by prefixing the plugin's name. Logging levels are + dynamically set when a plugin is called, depending on how it is called + (import stage, event or direct command). Finally, logging calls can (and + should!) use modern ``{}``-style string formatting lazily. See + :ref:`plugin-logging` in the plugin API docs. +* A new ``import_task_created`` event lets you manipulate import tasks + immediately after they are initialized. It's also possible to replace the + originally created tasks by returning new ones using this event. + +.. _bs1770gain: http://bs1770gain.sourceforge.net + + +1.3.10 (January 5, 2015) +------------------------ + +This version adds a healthy helping of new features and fixes a critical +MPEG-4--related bug. There are more lyrics sources, there new plugins for +managing permissions and integrating with `Plex`_, and the importer has a new +``--pretend`` flag that shows which music *would* be imported. + +One backwards-compatibility note: the :doc:`/plugins/lyrics` now requires the +`requests`_ library. If you use this plugin, you will need to install the +library by typing ``pip install requests`` or the equivalent for your OS. + +Also, as an advance warning, this will be one of the last releases to support +Python 2.6. If you have a system that cannot run Python 2.7, please consider +upgrading soon. + +The new features are: + +* A new :doc:`/plugins/permissions` makes it easy to fix permissions on music + files as they are imported. Thanks to :user:`xsteadfastx`. :bug:`1098` +* A new :doc:`/plugins/plexupdate` lets you notify a `Plex`_ server when the + database changes. Thanks again to xsteadfastx. :bug:`1120` +* The :ref:`import-cmd` command now has a ``--pretend`` flag that lists the + files that will be imported. Thanks to :user:`mried`. :bug:`1162` +* :doc:`/plugins/lyrics`: Add `Musixmatch`_ source and introduce a new + ``sources`` config option that lets you choose exactly where to look for + lyrics and in which order. +* :doc:`/plugins/lyrics`: Add Brazilian and Spanish sources to Google custom + search engine. +* Add a warning when importing a directory that contains no music. :bug:`1116` + :bug:`1127` +* :doc:`/plugins/zero`: Can now remove embedded images. :bug:`1129` :bug:`1100` +* The :ref:`config-cmd` command can now be used to edit the configuration even + when it has syntax errors. :bug:`1123` :bug:`1128` +* :doc:`/plugins/lyrics`: Added a new ``force`` config option. :bug:`1150` + +As usual, there are loads of little fixes and improvements: + +* Fix a new crash with the latest version of Mutagen (1.26). +* :doc:`/plugins/lyrics`: Avoid fetching truncated lyrics from the Google + backed by merging text blocks separated by empty ``<div>`` tags before + scraping. +* We now print a better error message when the database file is corrupted. +* :doc:`/plugins/discogs`: Only prompt for authentication when running the + :ref:`import-cmd` command. :bug:`1123` +* When deleting fields with the :ref:`modify-cmd` command, do not crash when + the field cannot be removed (i.e., when it does not exist, when it is a + built-in field, or when it is a computed field). :bug:`1124` +* The deprecated ``echonest_tempo`` plugin has been removed. Please use the + ``echonest`` plugin instead. +* ``echonest`` plugin: Fingerprint-based lookup has been removed in + accordance with `API changes`_. :bug:`1121` +* ``echonest`` plugin: Avoid a crash when the song has no duration + information. :bug:`896` +* :doc:`/plugins/lyrics`: Avoid a crash when retrieving non-ASCII lyrics from + the Google backend. :bug:`1135` :bug:`1136` +* :doc:`/plugins/smartplaylist`: Sort specifiers are now respected in queries. + Thanks to :user:`djl`. :bug:`1138` :bug:`1137` +* :doc:`/plugins/ftintitle` and :doc:`/plugins/lyrics`: Featuring artists can + now be detected when they use the Spanish word *con*. :bug:`1060` + :bug:`1143` +* :doc:`/plugins/mbcollection`: Fix an "HTTP 400" error caused by a change in + the MusicBrainz API. :bug:`1152` +* The ``%`` and ``_`` characters in path queries do not invoke their + special SQL meaning anymore. :bug:`1146` +* :doc:`/plugins/convert`: Command-line argument construction now works + on Windows. Thanks to :user:`mluds`. :bug:`1026` :bug:`1157` :bug:`1158` +* :doc:`/plugins/embedart`: Fix an erroneous missing-art error on Windows. + Thanks to :user:`mluds`. :bug:`1163` +* :doc:`/plugins/importadded`: Now works with in-place and symlinked imports. + :bug:`1170` +* :doc:`/plugins/ftintitle`: The plugin is now quiet when it runs as part of + the import process. Thanks to :user:`Freso`. :bug:`1176` :bug:`1172` +* :doc:`/plugins/ftintitle`: Fix weird behavior when the same artist appears + twice in the artist string. Thanks to Marc Addeo. :bug:`1179` :bug:`1181` +* :doc:`/plugins/lastgenre`: Match songs more robustly when they contain + dashes. Thanks to :user:`djl`. :bug:`1156` +* The :ref:`config-cmd` command can now use ``$EDITOR`` variables with + arguments. + +.. _API changes: http://developer.echonest.com/forums/thread/3650 +.. _Plex: https://plex.tv/ +.. _musixmatch: https://www.musixmatch.com/ + +1.3.9 (November 17, 2014) +------------------------- + +This release adds two new standard plugins to beets: one for synchronizing +Last.fm listening data and one for integrating with Linux desktops. And at +long last, imports can now create symbolic links to music files instead of +copying or moving them. We also gained the ability to search for album art on +the iTunes Store and a new way to compute ReplayGain levels. + +The major new features are: + +* A new :doc:`/plugins/lastimport` lets you download your play count data from + Last.fm into a flexible attribute. Thanks to Rafael Bodill. +* A new :doc:`/plugins/freedesktop` creates metadata files for + Freedesktop.org--compliant file managers. Thanks to :user:`kerobaros`. + :bug:`1056`, :bug:`707` +* A new :ref:`link` option in the ``import`` section creates symbolic links + during import instead of moving or copying. Thanks to Rovanion Luckey. + :bug:`710`, :bug:`114` +* :doc:`/plugins/fetchart`: You can now search for art on the iTunes Store. + There's also a new ``sources`` config option that lets you choose exactly + where to look for images and in which order. +* :doc:`/plugins/replaygain`: A new Python Audio Tools backend was added. + Thanks to Francesco Rubino. :bug:`1070` +* :doc:`/plugins/embedart`: You can now automatically check that new art looks + similar to existing art---ensuring that you only get a better "version" of + the art you already have. See :ref:`image-similarity-check`. +* :doc:`/plugins/ftintitle`: The plugin now runs automatically on import. To + disable this, unset the ``auto`` config flag. + +There are also core improvements and other substantial additions: + +* The ``media`` attribute is now a *track-level field* instead of an + album-level one. This field stores the delivery mechanism for the music, so + in its album-level incarnation, it could not represent heterogeneous + releases---for example, an album consisting of a CD and a DVD. Now, tracks + accurately indicate the media they appear on. Thanks to Heinz Wiesinger. +* Re-imports of your existing music (see :ref:`reimport`) now preserve its + added date and flexible attributes. Thanks to Stig Inge Lea Bjørnsen. +* Slow queries, such as those over flexible attributes, should now be much + faster when used with certain commands---notably, the :doc:`/plugins/play`. +* :doc:`/plugins/bpd`: Add a new configuration option for setting the default + volume. Thanks to IndiGit. +* :doc:`/plugins/embedart`: A new ``ifempty`` config option lets you only + embed album art when no album art is present. Thanks to kerobaros. +* :doc:`/plugins/discogs`: Authenticate with the Discogs server. The plugin + now requires a Discogs account due to new API restrictions. Thanks to + :user:`multikatt`. :bug:`1027`, :bug:`1040` + +And countless little improvements and fixes: + +* Standard cover art in APEv2 metadata is now supported. Thanks to Matthias + Kiefer. :bug:`1042` +* :doc:`/plugins/convert`: Avoid a crash when embedding cover art + fails. +* :doc:`/plugins/mpdstats`: Fix an error on start (introduced in the previous + version). Thanks to Zach Denton. +* :doc:`/plugins/convert`: The ``--yes`` command-line flag no longer expects + an argument. +* :doc:`/plugins/play`: Remove the temporary .m3u file after sending it to + the player. +* The importer no longer tries to highlight partial differences in numeric + quantities (track numbers and durations), which was often confusing. +* Date-based queries that are malformed (not parse-able) no longer crash + beets and instead fail silently. +* :doc:`/plugins/duplicates`: Emit an error when the ``checksum`` config + option is set incorrectly. +* The migration from pre-1.1, non-YAML configuration files has been removed. + If you need to upgrade an old config file, use an older version of beets + temporarily. +* :doc:`/plugins/discogs`: Recover from HTTP errors when communicating with + the Discogs servers. Thanks to Dustin Rodriguez. +* :doc:`/plugins/embedart`: Do not log "embedding album art into..." messages + during the import process. +* Fix a crash in the autotagger when files had only whitespace in their + metadata. +* :doc:`/plugins/play`: Fix a potential crash when the command outputs special + characters. :bug:`1041` +* :doc:`/plugins/web`: Queries typed into the search field are now treated as + separate query components. :bug:`1045` +* Date tags that use slashes instead of dashes as separators are now + interpreted correctly. And WMA (ASF) files now map the ``comments`` field to + the "Description" tag (in addition to "WM/Comments"). Thanks to Matthias + Kiefer. :bug:`1043` +* :doc:`/plugins/embedart`: Avoid resizing the image multiple times when + embedding into an album. Thanks to :user:`kerobaros`. :bug:`1028`, + :bug:`1036` +* :doc:`/plugins/discogs`: Avoid a situation where a trailing comma could be + appended to some artist names. :bug:`1049` +* The output of the :ref:`stats-cmd` command is slightly different: the + approximate size is now marked as such, and the total number of seconds only + appears in exact mode. +* :doc:`/plugins/convert`: A new ``copy_album_art`` option puts images + alongside converted files. Thanks to Ángel Alonso. :bug:`1050`, :bug:`1055` +* There is no longer a "conflict" between two plugins that declare the same + field with the same type. Thanks to Peter Schnebel. :bug:`1059` :bug:`1061` +* :doc:`/plugins/chroma`: Limit the number of releases and recordings fetched + as the result of an Acoustid match to avoid extremely long processing times + for very popular music. :bug:`1068` +* Fix an issue where modifying an album's field without actually changing it + would not update the corresponding tracks to bring differing tracks back in + line with the album. :bug:`856` +* ``echonest`` plugin: When communicating with the Echo Nest servers + fails repeatedly, log an error instead of exiting. :bug:`1096` +* :doc:`/plugins/lyrics`: Avoid an error when the Google source returns a + result without a title. Thanks to Alberto Leal. :bug:`1097` +* Importing an archive will no longer leave temporary files behind in + ``/tmp``. Thanks to :user:`multikatt`. :bug:`1067`, :bug:`1091` + + 1.3.8 (September 17, 2014) -------------------------- @@ -13,7 +968,7 @@ Flexible field types mean that some functionality that has previously only worked for built-in fields, like range queries, can now work with plugin- and -user-defined fields too. For starters, the :doc:`/plugins/echonest/` and +user-defined fields too. For starters, the ``echonest`` plugin and :doc:`/plugins/mpdstats` now mark the types of the fields they provide---so you can now say, for example, ``beet ls liveness:0.5..1.5`` for the Echo Nest "liveness" attribute. The :doc:`/plugins/types` makes it easy to specify field @@ -257,14 +1212,14 @@ * A new `initial_key` field is available in the database and files' tags. You can set the field manually using a command like ``beet modify initial_key=Am``. -* The :doc:`/plugins/echonest` sets the `initial_key` field if the data is +* The ``echonest`` plugin sets the `initial_key` field if the data is available. * A new :doc:`/plugins/keyfinder` runs a command-line tool to get the key from audio data and store it in the `initial_key` field. There are also many bug fixes and little enhancements: -* :doc:`/plugins/echonest`: Truncate files larger than 50MB before uploading for +* ``echonest`` plugin: Truncate files larger than 50MB before uploading for analysis. * :doc:`/plugins/fetchart`: Fix a crash when the server does not specify a content type. Thanks to Lee Reinhardt. @@ -321,7 +1276,7 @@ queries (combined with "or" logic). Album-level queries are also now possible and automatic playlist regeneration can now be disabled. Thanks to brilnius. -* :doc:`/plugins/echonest`: Echo Nest similarity now weights the tempo in +* ``echonest`` plugin: Echo Nest similarity now weights the tempo in better proportion to other metrics. Also, options were added to specify custom thresholds and output formats. Thanks to Adam M. * Added the :ref:`after_write <plugin_events>` plugin event. @@ -342,7 +1297,7 @@ legibility). * Fixed a regression that made it impossible to use the :ref:`modify-cmd` command to add new flexible fields. Thanks to brilnius. -* :doc:`/plugins/echonest`: Avoid crashing when the audio analysis fails. +* ``echonest`` plugin: Avoid crashing when the audio analysis fails. Thanks to Pedro Silva. * :doc:`/plugins/duplicates`: Fix checksumming command execution for files with quotation marks in their names. Thanks again to Pedro Silva. @@ -418,7 +1373,7 @@ Other little fixes: -* :doc:`/plugins/echonest`: Tempo (BPM) is now always stored as an integer. +* ``echonest`` plugin: Tempo (BPM) is now always stored as an integer. Thanks to Heinz Wiesinger. * Fix Python 2.6 compatibility in some logging statements in :doc:`/plugins/chroma` and :doc:`/plugins/lastgenre`. @@ -459,7 +1414,7 @@ no changes are necessary. Thanks to brilnius. * :doc:`/plugins/fetchart`: When using the ``remote_priority`` config option, local image files are no longer completely ignored. -* :doc:`/plugins/echonest`: Fix an issue causing the plugin to appear twice in +* ``echonest`` plugin: Fix an issue causing the plugin to appear twice in the output of the ``beet version`` command. * :doc:`/plugins/lastgenre`: Fix an occasional crash when no tag weight was returned by Last.fm. @@ -484,7 +1439,7 @@ We added some plugins and overhauled some existing ones: -* The new :doc:`/plugins/echonest` plugin can fetch a wide range of `acoustic +* The new ``echonest`` plugin plugin can fetch a wide range of `acoustic attributes`_ from `The Echo Nest`_, including the "speechiness" and "liveness" of each track. The new plugin supersedes an older version (``echonest_tempo``) that only fetched the BPM field. Thanks to Pedro Silva @@ -522,7 +1477,7 @@ * :doc:`/plugins/missing`: Avoid a possible error when an album's ``tracktotal`` field is missing. * :doc:`/plugins/ftintitle`: Fix an error when the sort artist is missing. -* :doc:`/plugins/echonest_tempo`: The plugin should now match songs more +* ``echonest_tempo``: The plugin should now match songs more reliably (i.e., fewer "no tempo found" messages). Thanks to Peter Schnebel. * :doc:`/plugins/convert`: Fix an "Item has no library" error when using the ``auto`` config option. @@ -588,7 +1543,7 @@ (instead of failing with an API error). * :doc:`/plugins/info`: Fail gracefully when used erroneously with a directory. -* :doc:`/plugins/echonest_tempo`: Fix an issue where the plugin could use the +* ``echonest_tempo``: Fix an issue where the plugin could use the tempo from the wrong song when the API did not contain the requested song. * Fix a crash when a file's metadata included a very large number (one wider than 64 bits). These huge numbers are now replaced with zeroes in the @@ -654,7 +1609,7 @@ There's more detail than you could ever need `on the beets blog`_. -.. _on the beets blog: http://beets.radbox.org/blog/flexattr.html +.. _on the beets blog: http://beets.io/blog/flexattr.html 1.2.2 (August 27, 2013) @@ -722,12 +1677,12 @@ Thanks to John Hawthorn. * :doc:`/plugins/web`: Item and album counts are now exposed through the API for use with the Tomahawk resolver. Thanks to Uwe L. Korn. -* Python 2.6 compatibility for :doc:`/plugins/beatport`, +* Python 2.6 compatibility for ``beatport``, :doc:`/plugins/missing`, and :doc:`/plugins/duplicates`. Thanks to Wesley Bitter and Pedro Silva. * Don't move the config file during a null migration. Thanks to Theofilos Intzoglou. -* Fix an occasional crash in the :doc:`/plugins/beatport` when a length +* Fix an occasional crash in the ``beatport`` when a length field was missing from the API response. Thanks to Timothy Appnel. * :doc:`/plugins/scrub`: Handle and log I/O errors. * :doc:`/plugins/lyrics`: The Google backend should now turn up more results. @@ -752,7 +1707,7 @@ * New :doc:`/plugins/discogs`: Get matches from the `Discogs`_ database. Thanks to Artem Ponomarenko and Tai Lee. -* New :doc:`/plugins/beatport`: Get matches from the `Beatport`_ database. +* New ``beatport`` plugin: Get matches from the `Beatport`_ database. Thanks to Johannes Baiter. We also have two other new plugins that can scan your library to check for @@ -860,8 +1815,7 @@ little love. If you're upgrading from 1.0.0 or earlier, this release (like the 1.1 betas) -will automatically migrate your configuration to the new system. See -:doc:`/guides/migration`. +will automatically migrate your configuration to the new system. * :doc:`/plugins/embedart`: The ``embedart`` command now embeds each album's associated art by default. The ``--file`` option invokes the old behavior, @@ -944,7 +1898,7 @@ * :doc:`/plugins/chroma`: A new ``fingerprint`` command lets you generate and store fingerprints for items that don't yet have them. One more round of applause for Lucas Duailibe. -* :doc:`/plugins/echonest_tempo`: API errors now issue a warning instead of +* ``echonest_tempo``: API errors now issue a warning instead of exiting with an exception. We also avoid an error when track metadata contains newlines. * When the importer encounters an error (insufficient permissions, for @@ -968,7 +1922,7 @@ import command. Thanks to Lucas Duailibe. * :doc:`/plugins/mbcollection`: Show friendly, human-readable errors when MusicBrainz exceptions occur. -* :doc:`/plugins/echonest_tempo`: Catch socket errors that are not handled by +* ``echonest_tempo``: Catch socket errors that are not handled by the Echo Nest library. * :doc:`/plugins/chroma`: Catch Acoustid Web service errors when submitting fingerprints. @@ -1067,7 +2021,7 @@ Dang Mai Hai. * :doc:`/plugins/bpd`: Browse by album artist and album artist sort name. Thanks to Steinþór Pálsson. -* :doc:`/plugins/echonest_tempo`: Don't attempt a lookup when the artist or +* ``echonest_tempo``: Don't attempt a lookup when the artist or track title is missing. * Fix an error when migrating the ``.beetsstate`` file on Windows. * A nicer error message is now given when the configuration file contains tabs. @@ -1081,8 +2035,7 @@ This release entirely revamps beets' configuration system. The configuration file is now a `YAML`_ document and is located, along with other support files, -in a common directory (e.g., ``~/.config/beets`` on Unix-like systems). If -you're upgrading from an earlier version, please see :doc:`/guides/migration`. +in a common directory (e.g., ``~/.config/beets`` on Unix-like systems). .. _YAML: http://en.wikipedia.org/wiki/YAML @@ -1133,7 +2086,7 @@ bugs found since that release. There were a couple of regressions and some bugs in a newly added plugin. -* :doc:`/plugins/echonest_tempo`: If the Echo Nest API limit is exceeded or a +* ``echonest_tempo``: If the Echo Nest API limit is exceeded or a communication error occurs, the plugin now waits and tries again instead of crashing. Thanks to Zach Denton. * :doc:`/plugins/fetchart`: Fix a regression that caused crashes when art was @@ -1163,7 +2116,7 @@ * New plugin: :doc:`/plugins/fuzzy` lets you find albums and tracks using **fuzzy string matching** so you don't have to type (or even remember) their exact names. Thanks to Philippe Mongeau. -* New plugin: :doc:`/plugins/echonest_tempo` fetches **tempo** (BPM) information +* New plugin: ``echonest_tempo`` fetches **tempo** (BPM) information from `The Echo Nest`_. Thanks to David Brenner. * New plugin: :doc:`/plugins/the` adds a template function that helps format text for nicely-sorted directory listings. Thanks to Blemjhoo Tezoulbr. @@ -1244,7 +2197,7 @@ unintentionally loading the plugins they contain. .. _The Echo Nest: http://the.echonest.com/ -.. _Tomahawk resolver: http://beets.radbox.org/blog/tomahawk-resolver.html +.. _Tomahawk resolver: http://beets.io/blog/tomahawk-resolver.html .. _mp3gain: http://mp3gain.sourceforge.net/download.php .. _aacgain: http://aacgain.altosdesign.com @@ -1441,7 +2394,7 @@ * Significant internal restructuring to avoid SQLite locking errors. As part of these changes, the not-very-useful "save" plugin event has been removed. -.. _pyacoustid: https://github.com/sampsyo/pyacoustid +.. _pyacoustid: https://github.com/beetbox/pyacoustid 1.0b13 (March 16, 2012) diff -Nru beets-1.3.8+dfsg/docs/conf.py beets-1.3.19/docs/conf.py --- beets-1.3.8+dfsg/docs/conf.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/conf.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,35 +1,41 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + AUTHOR = u'Adrian Sampson' # General configuration -extensions = ['sphinx.ext.autodoc'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks'] exclude_patterns = ['_build'] source_suffix = '.rst' master_doc = 'index' project = u'beets' -copyright = u'2012, Adrian Sampson' +copyright = u'2016, Adrian Sampson' version = '1.3' -release = '1.3.8' +release = '1.3.19' pygments_style = 'sphinx' -# Options for HTML output +# External links to the bug tracker. +extlinks = { + 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'), + 'user': ('https://github.com/%s', ''), +} -html_theme = 'default' +# Options for HTML output htmlhelp_basename = 'beetsdoc' # Options for LaTeX output - latex_documents = [ ('index', 'beets.tex', u'beets Documentation', AUTHOR, 'manual'), ] # Options for manual page output - man_pages = [ ('reference/cli', 'beet', u'music tagger and library organizer', [AUTHOR], 1), diff -Nru beets-1.3.8+dfsg/docs/dev/plugins.rst beets-1.3.19/docs/dev/plugins.rst --- beets-1.3.8+dfsg/docs/dev/plugins.rst 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/docs/dev/plugins.rst 2016-06-20 01:53:12.000000000 +0000 @@ -87,8 +87,11 @@ Try running ``pydoc beets.ui`` to see what's available. You can add command-line options to your new command using the ``parser`` member -of the ``Subcommand`` class, which is an ``OptionParser`` instance. Just use it -like you would a normal ``OptionParser`` in an independent script. +of the ``Subcommand`` class, which is a ``CommonOptionsParser`` instance. Just +use it like you would a normal ``OptionParser`` in an independent script. Note +that it offers several methods to add common options: ``--album``, ``--path`` +and ``--format``. This feature is versatile and extensively documented, try +``pydoc beets.ui.CommonOptionsParser`` for more information. .. _plugin_events: @@ -112,38 +115,57 @@ def loaded(): print 'Plugin loaded!' -Pass the name of the event in question to the ``listen`` decorator. The events -currently available are: +Pass the name of the event in question to the ``listen`` decorator. -* *pluginload*: called after all the plugins have been loaded after the ``beet`` +Note that if you want to access an attribute of your plugin (e.g. ``config`` or +``log``) you'll have to define a method and not a function. Here is the usual +registration process in this case:: + + from beets.plugins import BeetsPlugin + + class SomePlugin(BeetsPlugin): + def __init__(self): + super(SomePlugin, self).__init__() + self.register_listener('pluginload', self.loaded) + + def loaded(self): + self._log.info('Plugin loaded!') + +The events currently available are: + +* `pluginload`: called after all the plugins have been loaded after the ``beet`` command starts -* *import*: called after a ``beet import`` command finishes (the ``lib`` keyword +* `import`: called after a ``beet import`` command finishes (the ``lib`` keyword argument is a Library object; ``paths`` is a list of paths (strings) that were imported) -* *album_imported*: called with an ``Album`` object every time the ``import`` +* `album_imported`: called with an ``Album`` object every time the ``import`` command finishes adding an album to the library. Parameters: ``lib``, ``album`` -* *item_copied*: called with an ``Item`` object whenever its file is copied. +* `item_copied`: called with an ``Item`` object whenever its file is copied. Parameters: ``item``, ``source`` path, ``destination`` path -* *item_imported*: called with an ``Item`` object every time the importer adds a +* `item_imported`: called with an ``Item`` object every time the importer adds a singleton to the library (not called for full-album imports). Parameters: ``lib``, ``item`` -* *before_item_moved*: called with an ``Item`` object immediately before its +* `before_item_moved`: called with an ``Item`` object immediately before its file is moved. Parameters: ``item``, ``source`` path, ``destination`` path -* *item_moved*: called with an ``Item`` object whenever its file is moved. +* `item_moved`: called with an ``Item`` object whenever its file is moved. Parameters: ``item``, ``source`` path, ``destination`` path -* *item_removed*: called with an ``Item`` object every time an item (singleton +* `item_linked`: called with an ``Item`` object whenever a symlink is created + for a file. + Parameters: ``item``, ``source`` path, ``destination`` path + +* `item_removed`: called with an ``Item`` object every time an item (singleton or album's part) is removed from the library (even when its file is not deleted from disk). -* *write*: called with an ``Item`` object, a ``path``, and a ``tags`` +* `write`: called with an ``Item`` object, a ``path``, and a ``tags`` dictionary just before a file's metadata is written to disk (i.e., just before the file on disk is opened). Event handlers may change the ``tags`` dictionary to customize the tags that are written to the @@ -152,39 +174,70 @@ operation. Beets will catch that exception, print an error message and continue. -* *after_write*: called with an ``Item`` object after a file's metadata is +* `after_write`: called with an ``Item`` object after a file's metadata is written to disk (i.e., just after the file on disk is closed). -* *import_task_start*: called when before an import task begins processing. +* `import_task_created`: called immediately after an import task is + initialized. Plugins can use this to, for example, change imported files of a + task before anything else happens. It's also possible to replace the task + with another task by returning a list of tasks. This list can contain zero + or more `ImportTask`s. Returning an empty list will stop the task. Parameters: ``task`` (an `ImportTask`) and ``session`` (an `ImportSession`). -* *import_task_apply*: called after metadata changes have been applied in an - import task. Parameters: ``task`` and ``session``. +* `import_task_start`: called when before an import task begins processing. + Parameters: ``task`` and ``session``. + +* `import_task_apply`: called after metadata changes have been applied in an + import task. This is called on the same thread as the UI, so use this + sparingly and only for tasks that can be done quickly. For most plugins, an + import pipeline stage is a better choice (see :ref:`plugin-stage`). + Parameters: ``task`` and ``session``. -* *import_task_choice*: called after a decision has been made about an import +* `import_task_choice`: called after a decision has been made about an import task. This event can be used to initiate further interaction with the user. Use ``task.choice_flag`` to determine or change the action to be taken. Parameters: ``task`` and ``session``. -* *import_task_files*: called after an import task finishes manipulating the +* `import_task_files`: called after an import task finishes manipulating the filesystem (copying and moving files, writing metadata tags). Parameters: ``task`` and ``session``. -* *library_opened*: called after beets starts up and initializes the main +* `library_opened`: called after beets starts up and initializes the main Library object. Parameter: ``lib``. -* *database_change*: a modification has been made to the library database. The - change might not be committed yet. Parameter: ``lib``. +* `database_change`: a modification has been made to the library database. The + change might not be committed yet. Parameters: ``lib`` and ``model``. -* *cli_exit*: called just before the ``beet`` command-line program exits. +* `cli_exit`: called just before the ``beet`` command-line program exits. Parameter: ``lib``. +* `import_begin`: called just before a ``beet import`` session starts up. + Parameter: ``session``. + +* `trackinfo_received`: called after metadata for a track item has been + fetched from a data source, such as MusicBrainz. You can modify the tags + that the rest of the pipeline sees on a ``beet import`` operation or during + later adjustments, such as ``mbsync``. Slow handlers of the event can impact + the operation, since the event is fired for any fetched possible match + `before` the user (or the autotagger machinery) gets to see the match. + Parameter: ``info``. + +* `albuminfo_received`: like `trackinfo_received`, the event indicates new + metadata for album items. The parameter is an ``AlbumInfo`` object instead + of a ``TrackInfo``. + Parameter: ``info``. + +* `before_choose_candidate`: called before the user is prompted for a decision + during a ``beet import`` interactive session. Plugins can use this event for + :ref:`appending choices to the prompt <append_prompt_choices>` by returning a + list of ``PromptChoices``. Parameters: ``task`` and ``session``. + The included ``mpdupdate`` plugin provides an example use case for event listeners. Extend the Autotagger ^^^^^^^^^^^^^^^^^^^^^ -Plugins in can also enhance the functionality of the autotagger. For a +Plugins can also enhance the functionality of the autotagger. For a comprehensive example, try looking at the ``chroma`` plugin, which is included with beets. @@ -251,6 +304,14 @@ import the `config` object from the `beets` module. That is, just put ``from beets import config`` at the top of your plugin and access values from there. +If your plugin provides configuration values for sensitive data (e.g., +passwords, API keys, ...), you should add these to the config so they can be +redacted automatically when users dump their config. This can be done by +setting each value's `redact` flag, like so:: + + self.config['password'].redact = True + + Add Path Format Functions and Fields ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -288,7 +349,7 @@ def _tmpl_disc_and_track(item): """Expand to the disc number and track number if this is a - multi-disc release. Otherwise, just exapnds to the track + multi-disc release. Otherwise, just expands to the track number. """ if item.disctotal > 1: @@ -324,7 +385,7 @@ class FooPlugin(BeetsPlugin): def __init__(self): field = mediafile.MediaField( - mediafile.MP3DescStorageStyle(u'foo') + mediafile.MP3DescStorageStyle(u'foo'), mediafile.StorageStyle(u'foo') ) self.add_media_field('foo', field) @@ -338,6 +399,8 @@ # The "foo" tag of the file is now "ham" +.. _plugin-stage: + Add Import Pipeline Stages ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -353,7 +416,7 @@ and each task is processed by only one stage at a time. Plugins provide stages as functions that take two arguments: ``config`` and -``task``, which are ``ImportConfig`` and ``ImportTask`` objects (both defined in +``task``, which are ``ImportSession`` and ``ImportTask`` objects (both defined in ``beets.importer``). Add such a function to the plugin's ``import_stages`` field to register it:: @@ -362,7 +425,7 @@ def __init__(self): super(ExamplePlugin, self).__init__() self.import_stages = [self.stage] - def stage(self, config, task): + def stage(self, session, task): print('Importing something!') .. _extend-query: @@ -435,3 +498,102 @@ from the command line. * User input for flexible fields may be validated and converted. + + +.. _plugin-logging: + +Logging +^^^^^^^ + +Each plugin object has a ``_log`` attribute, which is a ``Logger`` from the +`standard Python logging module`_. The logger is set up to `PEP 3101`_, +str.format-style string formatting. So you can write logging calls like this:: + + self._log.debug(u'Processing {0.title} by {0.artist}', item) + +.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/ +.. _standard Python logging module: https://docs.python.org/2/library/logging.html + +When beets is in verbose mode, plugin messages are prefixed with the plugin +name to make them easier to see. + +Which messages will be logged depends on the logging level and the action +performed: + +* Inside import stages and event handlers, the default is ``WARNING`` messages + and above. +* Everywhere else, the default is ``INFO`` or above. + +The verbosity can be increased with ``--verbose`` (``-v``) flags: each flags +lowers the level by a notch. That means that, with a single ``-v`` flag, event +handlers won't have their ``DEBUG`` messages displayed, but command functions +(for example) will. With ``-vv`` on the command line, ``DEBUG`` messages will +be displayed everywhere. + +This addresses a common pattern where plugins need to use the same code for a +command and an import stage, but the command needs to print more messages than +the import stage. (For example, you'll want to log "found lyrics for this song" +when you're run explicitly as a command, but you don't want to noisily +interrupt the importer interface when running automatically.) + +.. _append_prompt_choices: + +Append Prompt Choices +^^^^^^^^^^^^^^^^^^^^^ + +Plugins can also append choices to the prompt presented to the user during +an import session. + +To do so, add a listener for the ``before_choose_candidate`` event, and return +a list of ``PromptChoices`` that represent the additional choices that your +plugin shall expose to the user:: + + from beets.plugins import BeetsPlugin + from beets.ui.commands import PromptChoice + + class ExamplePlugin(BeetsPlugin): + def __init__(self): + super(ExamplePlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.before_choose_candidate_event) + + def before_choose_candidate_event(self, session, task): + return [PromptChoice('p', 'Print foo', self.foo), + PromptChoice('d', 'Do bar', self.bar)] + + def foo(self, session, task): + print('User has chosen "Print foo"!') + + def bar(self, session, task): + print('User has chosen "Do bar"!') + +The previous example modifies the standard prompt:: + + # selection (default 1), Skip, Use as-is, as Tracks, Group albums, + Enter search, enter Id, aBort? + +by appending two additional options (``Print foo`` and ``Do bar``):: + + # selection (default 1), Skip, Use as-is, as Tracks, Group albums, + Enter search, enter Id, aBort, Print foo, Do bar? + +If the user selects a choice, the ``callback`` attribute of the corresponding +``PromptChoice`` will be called. It is the responsibility of the plugin to +check for the status of the import session and decide the choices to be +appended: for example, if a particular choice should only be presented if the +album has no candidates, the relevant checks against ``task.candidates`` should +be performed inside the plugin's ``before_choose_candidate_event`` accordingly. + +Please make sure that the short letter for each of the choices provided by the +plugin is not already in use: the importer will emit a warning and discard +all but one of the choices using the same letter, giving priority to the +core importer prompt choices. As a reference, the following characters are used +by the choices on the core importer prompt, and hence should not be used: +``a``, ``s``, ``u``, ``t``, ``g``, ``e``, ``i``, ``b``. + +Additionally, the callback function can optionally specify the next action to +be performed by returning one of the values from ``importer.action``, which +will be passed to the main loop upon the callback has been processed. Note that +``action.MANUAL`` and ``action.MANUAL_ID`` will have no effect even if +returned by the callback, due to the current architecture of the import +process. diff -Nru beets-1.3.8+dfsg/docs/faq.rst beets-1.3.19/docs/faq.rst --- beets-1.3.8+dfsg/docs/faq.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/faq.rst 2016-06-20 01:53:12.000000000 +0000 @@ -138,13 +138,13 @@ 2. Install from source. There are a few easy ways to do this: - Use ``pip`` to install the latest snapshot tarball: just type - ``pip install https://github.com/sampsyo/beets/tarball/master``. + ``pip install https://github.com/beetbox/beets/tarball/master``. - Grab the source using Git: - ``git clone https://github.com/sampsyo/beets.git``. Then + ``git clone https://github.com/beetbox/beets.git``. Then ``cd beets`` and type ``python setup.py install``. - Use ``pip`` to install an "editable" version of beets based on an automatic source checkout. For example, run - ``pip install -e git+https://github.com/sampsyo/beets#egg=beets`` + ``pip install -e git+https://github.com/beetbox/beets#egg=beets`` to clone beets and install it, allowing you to modify the source in-place to try out changes. @@ -157,8 +157,8 @@ …report a bug in beets? ----------------------- -We use the `issue tracker <https://github.com/sampsyo/beets/issues>`__ -on GitHub. `Enter a new issue <https://github.com/sampsyo/beets/issues/new>`__ +We use the `issue tracker <https://github.com/beetbox/beets/issues>`__ +on GitHub. `Enter a new issue <https://github.com/beetbox/beets/issues/new>`__ there to report a bug. Please follow these guidelines when reporting an issue: - Most importantly: if beets is crashing, please `include the @@ -208,6 +208,23 @@ :ref:`template-functions`. +.. _move-dir: + +…point beets at a new music directory? +-------------------------------------- + +If you want to move your music from one directory to another, the best way is +to let beets do it for you. First, edit your configuration and set the +``directory`` setting to the new place. Then, type ``beet move`` to have beets +move all your files. + +If you've already moved your music *outside* of beets, you have a few options: + +- Move the music back (with an ordinary ``mv``) and then use the above steps. +- Delete your database and re-create it from the new paths using ``beet import -AWC``. +- Resort to manually modifying the SQLite database (not recommended). + + Why does beets… =============== @@ -250,11 +267,11 @@ check which version of beets you have installed.) If you want to live on the bleeding edge and use the latest source -version of beets, you can check out the source (see the next question). +version of beets, you can check out the source (see :ref:`the relevant +question <src>`). To see the beets documentation for your version (and avoid confusion -with new features in trunk), select your version from the -left-hand sidebar (or the buttons at the bottom of the window). +with new features in trunk), select your version from the menu in the sidebar. .. _kill: @@ -308,7 +325,7 @@ FLAC files. If beets still complains about a file that seems to be valid, `file a -bug <https://github.com/sampsyo/beets/issues/new>`__ and we'll look into +bug <https://github.com/beetbox/beets/issues/new>`__ and we'll look into it. There's always a possibility that there's a bug "upstream" in the `Mutagen <http://code.google.com/p/mutagen/>`__ library used by beets, in which case we'll forward the bug to that project's tracker. diff -Nru beets-1.3.8+dfsg/docs/guides/advanced.rst beets-1.3.19/docs/guides/advanced.rst --- beets-1.3.8+dfsg/docs/guides/advanced.rst 2014-04-02 20:04:22.000000000 +0000 +++ beets-1.3.19/docs/guides/advanced.rst 2016-06-20 01:53:12.000000000 +0000 @@ -78,7 +78,7 @@ connect it to beets. `A post on the beets blog`_ has a more detailed guide. .. _A post on the beets blog: - http://beets.radbox.org/blog/tomahawk-resolver.html + http://beets.io/blog/tomahawk-resolver.html .. _Tomahawk: http://www.tomahawk-player.org @@ -112,12 +112,17 @@ The beets database keeps track of a long list of :ref:`built-in fields <itemfields>`, but you're not limited to just that list. Say, for example, that you like to categorize your music by the setting where it should be -played. You can invent a new ``context`` attribute store this. Set the field +played. You can invent a new ``context`` attribute to store this. Set the field using the :ref:`modify-cmd` command:: beet modify context=party artist:'beastie boys' -And then :doc:`query </reference/query>` your music just as you would with any +By default beets will show you the changes that are about to be applied and ask +if you really want to apply them to all, some or none of the items or albums. +You can type y for "yes", n for "no", or s for "select". If you choose the latter, +the command will prompt you for each individual matching item or album. + +Then :doc:`query </reference/query>` your music just as you would with any other field:: beet ls context:mope @@ -132,7 +137,7 @@ Read more than you ever wanted to know about the *flexible attributes* feature `on the beets blog`_. -.. _on the beets blog: http://beets.radbox.org/blog/flexattr.html +.. _on the beets blog: http://beets.io/blog/flexattr.html Choose a path style manually for some music diff -Nru beets-1.3.8+dfsg/docs/guides/index.rst beets-1.3.19/docs/guides/index.rst --- beets-1.3.8+dfsg/docs/guides/index.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/guides/index.rst 2016-06-20 01:53:12.000000000 +0000 @@ -11,4 +11,3 @@ main tagger advanced - migration diff -Nru beets-1.3.8+dfsg/docs/guides/main.rst beets-1.3.19/docs/guides/main.rst --- beets-1.3.8+dfsg/docs/guides/main.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/guides/main.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,18 +4,17 @@ Welcome to `beets`_! This guide will help you begin using it to make your music collection better. -.. _beets: http://beets.radbox.org/ +.. _beets: http://beets.io/ Installing ---------- -You will need Python. (Beets is written for `Python 2.7`_, but it works with -2.6 as well. Python 3.x is not yet supported.) +You will need Python. (Beets is written for `Python 2.7`_. 2.6 support has been +dropped, and Python 3.x is not yet supported.) -.. _Python 2.7: http://www.python.org/download/releases/2.7.2/ +.. _Python 2.7: http://www.python.org/download/ -* **Mac OS X** v10.7 (Lion) and 10.8 (Mountain Lion) include Python 2.7 out of - the box; Snow Leopard ships with Python 2.6. +* **Mac OS X** v10.7 (Lion) and later include Python 2.7 out of the box. * On **Debian or Ubuntu**, depending on the version, beets is available as an official package (`Debian details`_, `Ubuntu details`_), so try typing: @@ -35,15 +34,20 @@ * On **FreeBSD**, there's a `beets port`_ at ``audio/beets``. -* On **OpenBSD-current**, beets is available in ports (at ``audio/beets``) and - as a package, which can be installed with ``pkg_add beets``. +* On **OpenBSD**, beets can be installed with ``pkg_add beets``. * For **Slackware**, there's a `SlackBuild`_ available. +* On **Fedora** 22 or later, there is a `DNF package`_ (or three):: + + $ sudo dnf install beets beets-plugins beets-doc + +.. _copr: https://copr.fedoraproject.org/coprs/afreof/beets/ +.. _dnf package: https://apps.fedoraproject.org/packages/beets .. _SlackBuild: http://slackbuilds.org/repository/14.1/multimedia/beets/ .. _beets port: http://portsmon.freebsd.org/portoverview.py?category=audio&portname=beets -.. _beets from AUR: http://aur.archlinux.org/packages.php?ID=39577 -.. _dev package: http://aur.archlinux.org/packages.php?ID=48617 +.. _beets from AUR: https://aur.archlinux.org/packages/beets-git/ +.. _dev package: https://aur.archlinux.org/packages/beets-git/ .. _Debian details: http://packages.qa.debian.org/b/beets.html .. _Ubuntu details: https://launchpad.net/ubuntu/+source/beets .. _beets is in [community]: https://www.archlinux.org/packages/community/any/beets/ @@ -97,7 +101,7 @@ `the mailing list`_. .. _install Python: http://python.org/download/ -.. _beets.reg: https://github.com/sampsyo/beets/blob/master/extra/beets.reg +.. _beets.reg: https://github.com/beetbox/beets/blob/master/extra/beets.reg .. _install pip: http://www.pip-installer.org/en/latest/installing.html#install-pip .. _get-pip.py: https://raw.github.com/pypa/pip/master/contrib/get-pip.py @@ -117,7 +121,9 @@ Change that first path to a directory where you'd like to keep your music. Then, for ``library``, choose a good place to keep a database file that keeps an index -of your music. +of your music. (The config's format is `YAML`_. You'll want to configure your +text editor to use spaces, not real tabs, for indentation.) + The default configuration assumes you want to start a new organized music folder (that ``directory`` above) and that you'll *copy* cleaned-up music into that @@ -151,6 +157,8 @@ here, including the directory and file naming scheme. See :doc:`/reference/config` for a full reference. +.. _YAML: http://yaml.org/ + Importing Your Library ---------------------- diff -Nru beets-1.3.8+dfsg/docs/guides/migration.rst beets-1.3.19/docs/guides/migration.rst --- beets-1.3.8+dfsg/docs/guides/migration.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/guides/migration.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -Upgrading from 1.0 -================== - -Prior to version 1.1, beets used a completely different system for -configuration. The config file was in "INI" syntax instead of `YAML`_ and the -various files used by beets were (messily) stored in ``$HOME`` instead of a -centralized beets directory. If you're upgrading from version 1.0 or earlier, -your configuration syntax (and paths) need to be updated to work with the -latest version. - -Fortunately, this should require very little effort on your part. When you -first run beets 1.1, it will look for an old-style ``.beetsconfig`` to -migrate. If it finds one (and there is no new-style -``config.yaml`` yet), beets will warn you and then -transparently convert one to the other. At this point, you'll likely want to: - -* Look at your new configuration file (find out where in - :doc:`/reference/config`) to make sure everything was migrated correctly. -* Remove your old configuration file (``~/.beetsconfig`` on Unix; - ``%APPDATA%\beetsconfig.ini`` on Windows) to avoid confusion in the future. - -You might be interested in the :doc:`/changelog` to see which configuration -option names have changed. - -What's Migrated ---------------- - -Automatic migration is most important for the configuration file, since its -syntax is completely different, but two other files are also moved. This is to -consolidate everything beets needs in a single directory instead of leaving it -messily strewn about in your home directory. - -First, the library database file was at ``~/.beetsmusic.blb`` on Unix and -``%APPDATA%\beetsmusic.blb`` on Windows. This file will be copied to -``library.db`` in the same directory as your new configuration file. Finally, -the runtime state file, which keeps track of interrupted and incremental -imports, was previously known as ``~/.beetsstate``; it is copied to a file -called ``state.pickle``. - -Feel free to remove the old files once they've been copied to their new homes. - -Manual Migration ----------------- - -If you find you need to re-run the migration process, just type ``beet -migrate`` in your shell. This will migrate the configuration file, the -database, and the runtime state file all over again. Unlike automatic -migration, no step is suppressed if the file already exists. If you already -have a ``config.yaml``, for example, it will be renamed to make room for the -newly migrated configuration. - -.. _YAML: http://en.wikipedia.org/wiki/YAML diff -Nru beets-1.3.8+dfsg/docs/guides/tagger.rst beets-1.3.19/docs/guides/tagger.rst --- beets-1.3.8+dfsg/docs/guides/tagger.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/guides/tagger.rst 2016-06-20 01:53:12.000000000 +0000 @@ -60,8 +60,8 @@ because beets by default infers tags based on existing metadata. But this is not a hard and fast rule---there are a few ways to tag metadata-poor music: - * You can use the *E* option described below to search in MusicBrainz for - a specific album or song. + * You can use the *E* or *I* options described below to search in + MusicBrainz for a specific album or song. * The :doc:`Acoustid plugin </plugins/chroma>` extends the autotagger to use acoustic fingerprinting to find information for arbitrary audio. Install that plugin if you're willing to spend a little more CPU power @@ -76,7 +76,7 @@ Musepack, Windows Media, Opus, and AIFF files are supported. (Do you use some other format? Please `file a feature request`_!) -.. _file a feature request: https://github.com/sampsyo/beets/issues/new +.. _file a feature request: https://github.com/beetbox/beets/issues/new Now that that's out of the way, let's tag some music. @@ -160,10 +160,10 @@ Beirut - Lon Gisland (Similarity: 94.4%) * Scenic World (Second Version) -> Scenic World - [A]pply, More candidates, Skip, Use as-is, as Tracks, Enter search, or aBort? + [A]pply, More candidates, Skip, Use as-is, as Tracks, Enter search, enter Id, or aBort? When beets asks you this question, it wants you to enter one of the capital -letters: A, M, S, U, T, G, E, or B. That is, you can choose one of the +letters: A, M, S, U, T, G, E, I or B. That is, you can choose one of the following: * *A*: Apply the suggested changes shown and move on. @@ -190,6 +190,11 @@ option if beets hasn't found any good options because the album is mistagged or untagged. +* *I*: Enter a metadata backend ID to use as search in the database. Use this + option to specify a backend entity (for example, a MusicBrainz release or + recording) directly, by pasting its ID or the full URL. You can also specify + several IDs by separating them by a space. + * *B*: Cancel this import task altogether. No further albums will be tagged; beets shuts down immediately. The next time you attempt to import the same directory, though, beets will ask you if you want to resume tagging where you @@ -281,7 +286,7 @@ If you think beets is ignoring an album that's listed in MusicBrainz, please `file a bug report`_. -.. _file a bug report: https://github.com/sampsyo/beets/issues +.. _file a bug report: https://github.com/beetbox/beets/issues I Hope That Makes Sense ----------------------- diff -Nru beets-1.3.8+dfsg/docs/index.rst beets-1.3.19/docs/index.rst --- beets-1.3.8+dfsg/docs/index.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/index.rst 2016-06-20 01:53:12.000000000 +0000 @@ -17,9 +17,9 @@ tracker. Please let us know where you think this documentation can be improved. -.. _beets: http://beets.radbox.org/ +.. _beets: http://beets.io/ .. _the mailing list: http://groups.google.com/group/beets-users -.. _file a bug: https://github.com/sampsyo/beets/issues +.. _file a bug: https://github.com/beetbox/beets/issues Contents -------- diff -Nru beets-1.3.8+dfsg/docs/Makefile beets-1.3.19/docs/Makefile --- beets-1.3.8+dfsg/docs/Makefile 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/Makefile 2016-06-20 01:53:12.000000000 +0000 @@ -17,7 +17,7 @@ PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest auto help: @echo "Please use \`make <target>' where <target> is one of" diff -Nru beets-1.3.8+dfsg/docs/plugins/acousticbrainz.rst beets-1.3.19/docs/plugins/acousticbrainz.rst --- beets-1.3.8+dfsg/docs/plugins/acousticbrainz.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/acousticbrainz.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,53 @@ +AcousticBrainz Plugin +===================== + +The ``acousticbrainz`` plugin gets acoustic-analysis information from the +`AcousticBrainz`_ project. + +.. _AcousticBrainz: http://acousticbrainz.org/ + +Enable the ``acousticbrainz`` plugin in your configuration (see :ref:`using-plugins`) and run it by typing:: + + $ beet acousticbrainz [QUERY] + +For all tracks with a MusicBrainz recording ID, the plugin currently sets +these fields: + +* ``average_loudness`` +* ``chords_changes_rate`` +* ``chords_key`` +* ``chords_number_rate`` +* ``chords_scale`` +* ``danceable`` +* ``gender`` +* ``genre_rosamerica`` +* ``initial_key`` (This is a built-in beets field, which can also be provided + by :doc:`/plugins/keyfinder`.) +* ``key_strength`` +* ``mood_acoustic`` +* ``mood_aggressive`` +* ``mood_electronic`` +* ``mood_happy`` +* ``mood_party`` +* ``mood_relaxed`` +* ``mood_sad`` +* ``rhythm`` +* ``tonal`` +* ``voice_instrumental`` + +Automatic Tagging +----------------- + +To automatically tag files using AcousticBrainz data during import, just +enable the ``acousticbrainz`` plugin (see :ref:`using-plugins`). When importing +new files, beets will query the AcousticBrainz API using MBID and +set the appropriate metadata. + +Configuration +------------- + +To configure the plugin, make a ``acousticbrainz:`` section in your +configuration file. There is one option: + +- **auto**: Enable AcousticBrainz during ``beet import``. + Default: ``yes``. diff -Nru beets-1.3.8+dfsg/docs/plugins/badfiles.rst beets-1.3.19/docs/plugins/badfiles.rst --- beets-1.3.8+dfsg/docs/plugins/badfiles.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/badfiles.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,54 @@ +Bad Files Plugin +================ + +The ``badfiles`` plugin adds a ``beet bad`` command to check for missing and +corrupt files. + +Configuring +----------- + +First, enable the ``badfiles`` plugin (see :ref:`using-plugins`). The default +configuration defines the following default checkers, which you may need to +install yourself: + +* `mp3val`_ for MP3 files +* `FLAC`_ command-line tools for FLAC files + +You can also add custom commands for a specific extension, like this:: + + badfiles: + commands: + ogg: myoggchecker --opt1 --opt2 + flac: flac --test --warnings-as-errors --silent + +Custom commands will be run once for each file of the specified type, with the +path to the file as the last argument. Commands must return a status code +greater than zero for a file to be considered corrupt. + +.. _mp3val: http://mp3val.sourceforge.net/ +.. _flac: https://xiph.org/flac/ + +Using +----- + +Type ``beet bad`` with a query according to beets' usual query syntax. For +instance, this will run a check on all songs containing the word "wolf":: + + beet bad wolf + +This one will run checks on a specific album:: + + beet bad album_id:1234 + +Here is an example where the FLAC decoder signals a corrupt file:: + + beet bad title::^$ + /tank/Music/__/00.flac: command exited with status 1 + 00.flac: *** Got error code 2:FLAC__STREAM_DECODER_ERROR_STATUS_FRAME_CRC_MISMATCH + 00.flac: ERROR while decoding data + state = FLAC__STREAM_DECODER_READ_FRAME + +Note that the default `mp3val` checker is a bit verbose and can output a lot +of "stream error" messages, even for files that play perfectly well. +Generally, if more than one stream error happens, or if a stream error happens +in the middle of a file, this is a bad sign. diff -Nru beets-1.3.8+dfsg/docs/plugins/beatport.rst beets-1.3.19/docs/plugins/beatport.rst --- beets-1.3.8+dfsg/docs/plugins/beatport.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/beatport.rst 2016-06-26 00:42:09.000000000 +0000 @@ -1,33 +1,36 @@ Beatport Plugin =============== -.. warning:: - - As of October 2013, Beatport has `closed their API`_. We've contacted them - to attempt to gain access as a "partner." Until this happens, though, this - plugin won't work. - The ``beatport`` plugin adds support for querying the `Beatport`_ catalogue during the autotagging process. This can potentially be helpful for users whose collection includes a lot of diverse electronic music releases, for which both MusicBrainz and (to a lesser degree) Discogs show no matches. -.. _Beatport: http://beatport.com -.. _closed their API: http://api.beatport.com - Installation ------------ -To see matches from the ``beatport`` plugin, you first have to enable it in -your configuration (see :doc:`/plugins/index`). Then, install the `requests`_ -library (which we need for querying the Beatport API) by typing:: - - pip install requests +To use the ``beatport`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). Then, install the `requests`_ and `requests_oauthlib`_ +libraries (which we need for querying and authorizing with the Beatport API) +by typing:: + + pip install requests requests_oauthlib + +You will also need to register for a `Beatport`_ account. The first time you +run the :ref:`import-cmd` command after enabling the plugin, it will ask you +to authorize with Beatport by visiting the site in a browser. On the site +you will be asked to enter your username and password to authorize beets +to query the Beatport API. You will then be displayed with a single line of +text that you should paste into your terminal. This will store the +authentication data for subsequent runs and you will not be required to +repeat the above steps. -And you're done. Matches from Beatport should now show up alongside matches +Matches from Beatport should now show up alongside matches from MusicBrainz and other sources. If you have a Beatport ID or a URL for a release or track you want to tag, you can just enter one of the two at the "enter Id" prompt in the importer. .. _requests: http://docs.python-requests.org/en/latest/ +.. _requests_oauthlib: https://github.com/requests/requests-oauthlib +.. _Beatport: http://beatport.com diff -Nru beets-1.3.8+dfsg/docs/plugins/bpd.rst beets-1.3.19/docs/plugins/bpd.rst --- beets-1.3.8+dfsg/docs/plugins/bpd.rst 2014-04-08 02:32:52.000000000 +0000 +++ beets-1.3.19/docs/plugins/bpd.rst 2016-06-20 17:32:29.000000000 +0000 @@ -16,35 +16,29 @@ Before you can use BPD, you'll need the media library called GStreamer (along with its Python bindings) on your system. -* On Mac OS X, you can use `MacPorts`_ or `Homebrew`_. For MacPorts, just run - ``port install py27-gst-python``. For Homebrew, the appropriate formulae are - in `homebrew-versions`_, so run ``brew tap homebrew/versions`` and then - ``brew install gst-python010``. (Note that you'll need the Mac OS X - Developer Tools in either case.) +* On Mac OS X, you can use `Homebrew`_. Run ``brew install gstreamer`` and then + ``brew install pygobject3``. .. _homebrew-versions: https://github.com/Homebrew/homebrew-versions -* On Linux, it's likely that you already have gst-python. (If not, your - distribution almost certainly has a package for it.) +* On Linux, you need to install GStreamer 1.0 and the GObject bindings for + python. Under Ubuntu, they are called `python-gi` and `gstreamer1.0`. -* On Windows, you may want to try `GStreamer WinBuilds`_ (cavet emptor: I +* On Windows, you may want to try `GStreamer WinBuilds`_ (caveat emptor: I haven't tried this). You will also need the various GStreamer plugin packages to make everything work. See the :doc:`/plugins/chroma` documentation for more information on installing GStreamer plugins. -.. _MacPorts: http://www.macports.org/ .. _GStreamer WinBuilds: http://www.gstreamer-winbuild.ylatuya.es/ .. _Homebrew: http://mxcl.github.com/homebrew/ -Using and Configuring ---------------------- - -BPD is a plugin for beets. It comes with beets, but it's disabled by default. -To enable it, you'll need to edit your :doc:`configuration file -</reference/config>` and add ``bpd`` to your ``plugins:`` line. +Usage +----- +To use the ``bpd`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). Then, you can run BPD by invoking:: $ beet bpd @@ -69,25 +63,39 @@ be run locally. Control your music from your laptop (or phone!) while it plays on your headless server box. Rad! -To configure the BPD server, add a ``bpd:`` section to your ``config.yaml`` -file. The configuration values, which are pretty self-explanatory, are ``host``, -``port``, and ``password``. Here's an example:: +Configuration +------------- + +To configure the plugin, make a ``bpd:`` section in your configuration file. +The available options are: + +- **host**: + Default: Bind to all interfaces. +- **port**: + Default: 6600 +- **password**: + Default: No password. +- **volume**: Initial volume, as a percentage. + Default: 100 + +Here's an example:: bpd: host: 127.0.0.1 port: 6600 password: seekrit + volume: 100 Implementation Notes -------------------- -In the real MPD, the user can browse a music directory as it appears on disk. In -beets, we like to abstract away from the directory structure. Therefore, BPD +In the real MPD, the user can browse a music directory as it appears on disk. +In beets, we like to abstract away from the directory structure. Therefore, BPD creates a "virtual" directory structure (artist/album/track) to present to -clients. This is static for now and cannot be reconfigured like the real on-disk -directory structure can. (Note that an obvious solution to this is just string -matching on items' destination, but this requires examining the entire library -Python-side for every query.) +clients. This is static for now and cannot be reconfigured like the real +on-disk directory structure can. (Note that an obvious solution to this is just +string matching on items' destination, but this requires examining the entire +library Python-side for every query.) We don't currently support versioned playlists. Many clients, however, use plchanges instead of playlistinfo to get the current playlist, so plchanges diff -Nru beets-1.3.8+dfsg/docs/plugins/bpm.rst beets-1.3.19/docs/plugins/bpm.rst --- beets-1.3.8+dfsg/docs/plugins/bpm.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/bpm.rst 2016-06-20 01:53:12.000000000 +0000 @@ -1,22 +1,29 @@ BPM Plugin ========== -This ``bpm`` plugin lets you to get the tempo (beats per minute) of a song by tapping out the beat on your keyboard. +This ``bpm`` plugin lets you to get the tempo (beats per minute) of a song by +tapping out the beat on your keyboard. Usage ----- -First, enable the plugin ``bpm`` as described in :doc:`/plugins/index`. Then, play a song you want to measure in your favorite media player and type:: +To use the ``bpm`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). - beet bpm <song> +Then, play a song you want to measure in your favorite media player and type:: -You'll be prompted to press Enter three times to the rhythm. This typically allows to determine the BPM within 5% accuracy. + beet bpm <song> -The plugin works best if you wrap it in a script that gets the playing song. for instance, with ``mpc`` you can do something like:: +You'll be prompted to press Enter three times to the rhythm. This typically +allows to determine the BPM within 5% accuracy. + +The plugin works best if you wrap it in a script that gets the playing song. +for instance, with ``mpc`` you can do something like:: beet bpm $(mpc |head -1|tr -d "-") Credit ------ -This plugin is inspired by a similar feature present in the Banshee media player. +This plugin is inspired by a similar feature present in the Banshee media +player. diff -Nru beets-1.3.8+dfsg/docs/plugins/bucket.rst beets-1.3.19/docs/plugins/bucket.rst --- beets-1.3.8+dfsg/docs/plugins/bucket.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/bucket.rst 2016-06-20 01:53:12.000000000 +0000 @@ -1,14 +1,15 @@ Bucket Plugin -============== +============= The ``bucket`` plugin groups your files into buckets folders representing *ranges*. This kind of organization can classify your music by periods of time -(e.g,. *1960s*, *1970s*, etc.), or to divide overwhelmingly large folders into -smaller subfolders by grouping albums or artists alphabetically (e.g., *A-F*, +(e.g,. *1960s*, *1970s*, etc.), or divide overwhelmingly large folders into +smaller subfolders by grouping albums or artists alphabetically (e.g. *A-F*, *G-M*, *N-Z*). -To use the plugin, enable ``bucket`` in your configuration file (see -:ref:`using-plugins`). The plugin provides a :ref:`template function +To use the ``bucket`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). +The plugin provides a :ref:`template function <template-functions>` called ``%bucket`` for use in path format expressions:: paths: @@ -25,25 +26,45 @@ The definition of a range is somewhat loose, and multiple formats are allowed: -- For alpha ranges: the range is defined by the lowest and highest (ASCII-wise) alphanumeric characters in the string you provide. For example, *ABCD*, *A-D*, *A->D*, and *[AD]* are all equivalent. -- For year ranges: digits characters are extracted and the two extreme years define the range. For example, *1975-77*, *1975,76,77* and *1975-1977* are equivalent. If no upper bound is given, the range is extended to current year (unless a later range is defined). For example, *1975* encompasses all years from 1975 until now. +- For alpha ranges: the range is defined by the lowest and highest (ASCII-wise) + alphanumeric characters in the string you provide. For example, *ABCD*, + *A-D*, *A->D*, and *[AD]* are all equivalent. +- For year ranges: digits characters are extracted and the two extreme years + define the range. For example, *1975-77*, *1975,76,77* and *1975-1977* are + equivalent. If no upper bound is given, the range is extended to current year + (unless a later range is defined). For example, *1975* encompasses all years + from 1975 until now. + +Configuration +------------- + +To configure the plugin, make a ``bucket:`` section in your configuration file. +The available options are: + +- **bucket_alpha**: Ranges to use for all substitutions occurring on textual + fields. + Default: none. +- **bucket_alpha_regex**: A ``range: regex`` mapping (one per line) where + ``range`` is one of the `bucket_alpha` ranges and ``value`` is a regex that + overrides original range definition. + Default: none. +- **bucket_year**: Ranges to use for all substitutions occurring on the + `$year` field. + Default: none. +- **extrapolate**: Enable this if you want to group your files into multiple + year ranges without enumerating them all. This option will generate year + bucket names by reproducing characteristics of declared buckets. + Default: ``no`` -If you want to group your files into multiple year ranges, you don't have to -enumerate them all in `bucket_year` parameter but can activate the ``extrapolate`` -option instead. This option will generate year bucket names by reproducing characteristics -of declared buckets:: - - bucket: - bucket_year: ['2000-05'] - extrapolate: true - -The above configuration creates five-year ranges for any input year. - -If the automatic range of an alpha bucket is not sufficient an overriding regular expression can be used:: +Here's an example:: bucket: + bucket_year: ['2000-05'] + extrapolate: true bucket_alpha: ['A - D', 'E - L', 'M - R', 'S - Z'] bucket_alpha_regex: 'A - D': ^[0-9a-dA-D…äÄ] -The *A - D* bucket now matches also all artists starting with ä or Ä and 0 to 9 and … (three dots). The other buckets work as ranges (see above). +This configuration creates five-year ranges for any input year. +The *A - D* bucket now matches also all artists starting with ä or Ä and 0 to 9 +and … (ellipsis). The other alpha buckets work as ranges. diff -Nru beets-1.3.8+dfsg/docs/plugins/chroma.rst beets-1.3.19/docs/plugins/chroma.rst --- beets-1.3.8+dfsg/docs/plugins/chroma.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/chroma.rst 2016-06-20 01:53:12.000000000 +0000 @@ -64,36 +64,40 @@ * OS X has a number of decoders already built into Core Audio, so there's no need to install anything. -* On Linux, you can install `GStreamer for Python`_, `FFmpeg`_, or `MAD`_ and - `pymad`_. How you install these will depend on your distribution. For example, - on Ubuntu, run ``apt-get install python-gst0.10-dev``. On Arch Linux, you want - ``pacman -S gstreamer0.10-python``. If you use GStreamer, be sure to install - its codec plugins also. +* On Linux, you can install `GStreamer`_ with `PyGObject`_, `FFmpeg`_, or + `MAD`_ with `pymad`_. How you install these will depend on your + distribution. + For example, on Ubuntu, run ``apt-get install gstreamer1.0 python-gi``. On + Arch Linux, you want ``pacman -S gstreamer python2-gobject``. If you use + GStreamer, be sure to install its codec plugins also (``gst-plugins-good``, + etc.). + + Note that if you install beets in a virtualenv, you'll need it to have + ``--system-site-packages`` enabled for Python to see the GStreamer bindings. * On Windows, try the Gstreamer "WinBuilds" from the `OSSBuild`_ project. -.. _audioread: https://github.com/sampsyo/audioread -.. _pyacoustid: http://github.com/sampsyo/pyacoustid -.. _GStreamer for Python: - http://gstreamer.freedesktop.org/modules/gst-python.html +.. _audioread: https://github.com/beetbox/audioread +.. _pyacoustid: http://github.com/beetbox/pyacoustid .. _FFmpeg: http://ffmpeg.org/ .. _MAD: http://spacepants.org/src/pymad/ .. _pymad: http://www.underbit.com/products/mad/ .. _Core Audio: http://developer.apple.com/technologies/mac/audio-and-video.html .. _OSSBuild: http://code.google.com/p/ossbuild/ +.. _Gstreamer: http://gstreamer.freedesktop.org/ +.. _PyGObject: https://wiki.gnome.org/Projects/PyGObject To decode audio formats (MP3, FLAC, etc.) with GStreamer, you'll need the standard set of Gstreamer plugins. For example, on Ubuntu, install the packages ``gstreamer0.10-plugins-good``, ``gstreamer0.10-plugins-bad``, and ``gstreamer0.10-plugins-ugly``. -Using +Usage ----- -Once you have all the dependencies sorted out, you can enable fingerprinting by -editing your :doc:`configuration file </reference/config>`. Put ``chroma`` on -your ``plugins:`` line. With that, beets will use fingerprinting the next time -you run ``beet import``. +Once you have all the dependencies sorted out, enable the ``chroma`` plugin in +your configuration (see :ref:`using-plugins`) to benefit from fingerprinting +the next time you run ``beet import``. You can also use the ``beet fingerprint`` command to generate fingerprints for items already in your library. (Provide a query to fingerprint a subset of your @@ -101,16 +105,20 @@ If you have the ``import.write`` config option enabled, they will also be written to files' metadata. -You can disable fingerprinting on import by setting the ``auto`` option to -false, like so:: +.. _submitfp: + +Configuration +------------- + +There is one configuration option in the ``chroma:`` section, ``auto``, which +controls whether to fingerprint files during the import process. To disable +fingerprint-based autotagging, set it to ``no``, like so:: chroma: auto: no -.. _submitfp: - Submitting Fingerprints -''''''''''''''''''''''' +----------------------- You can help expand the `Acoustid`_ database by submitting fingerprints for the music in your collection. To do this, first `get an API key`_ from the Acoustid diff -Nru beets-1.3.8+dfsg/docs/plugins/convert.rst beets-1.3.19/docs/plugins/convert.rst --- beets-1.3.8+dfsg/docs/plugins/convert.rst 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/docs/plugins/convert.rst 2016-06-20 01:53:12.000000000 +0000 @@ -10,8 +10,8 @@ Installation ------------ -Enable the ``convert`` plugin in your configuration (see -:doc:`/plugins/index`). By default, the plugin depends on `FFmpeg`_ to +To use the ``convert`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). By default, the plugin depends on `FFmpeg`_ to transcode the audio, so you might want to install it. .. _FFmpeg: http://ffmpeg.org @@ -50,35 +50,44 @@ Configuration ------------- -The plugin offers several configuration options, all of which live under the -``convert:`` section: +To configure the plugin, make a ``convert:`` section in your configuration +file. The available options are: -* ``dest`` sets the directory the files will be converted (or copied) to. - A destination is required---you either have to provide it in the config file - or on the command-line using the ``-d`` flag. -* ``embed`` indicates whether or not to embed album art in converted items. - Default: true. -* If you set ``max_bitrate``, all lossy files with a higher bitrate will be +- **auto**: Import transcoded versions of your files automatically during + imports. With this option enabled, the importer will transcode all (in the + default configuration) non-MP3 files over the maximum bitrate before adding + them to your library. + Default: ``no``. +- **tmpdir**: The directory where temporary files will be stored during import. + Default: none (system default), +- **copy_album_art**: Copy album art when copying or transcoding albums matched + using the ``-a`` option. Default: ``no``. +- **album_art_maxwidth**: Downscale album art if it's too big. The resize + operation reduces image width to at most ``maxwidth`` pixels while + preserving the aspect ratio. +- **dest**: The directory where the files will be converted (or copied) to. + Default: none. +- **embed**: Embed album art in converted items. Default: ``yes``. +- **max_bitrate**: All lossy files with a higher bitrate will be transcoded and those with a lower bitrate will simply be copied. Note that this does not guarantee that all converted files will have a lower bitrate---that depends on the encoder and its configuration. -* ``auto`` gives you the option to import transcoded versions of your files - automatically during the ``import`` command. With this option enabled, the - importer will transcode all non-MP3 files over the maximum bitrate before - adding them to your library. -* ``quiet`` mode prevents the plugin from announcing every file it processes. - Default: false. -* ``never_convert_lossy_files`` means that lossy codecs, such as mp3, ogg vorbis, - etc, are never converted, as converting lossy files to other lossy codecs will - decrease quality further. If set to true, lossy files are always copied. - Default: false -* ``paths`` lets you specify the directory structure and naming scheme for the - converted files. Use the same format as the top-level ``paths`` section (see - :ref:`path-format-config`). By default, the plugin reuses your top-level - path format settings. -* Finally, ``threads`` determines the number of threads to use for parallel - encoding. By default, the plugin will detect the number of processors - available and use them all. + Default: none. +- **never_convert_lossy_files**: Cross-conversions between lossy codecs---such + as mp3, ogg vorbis, etc.---makes little sense as they will decrease quality + even further. If set to ``yes``, lossy files are always copied. + Default: ``no``. +- **paths**: The directory structure and naming scheme for the converted + files. Uses the same format as the top-level ``paths`` section (see + :ref:`path-format-config`). + Default: Reuse your top-level path format settings. +- **quiet**: Prevent the plugin from announcing every file it processes. + Default: ``false``. +- **threads**: The number of threads to use for parallel encoding. + By default, the plugin will detect the number of processors available and use + them all. + +You can also configure the format to use for transcoding. .. _convert-format-config: @@ -87,7 +96,9 @@ You can customize the transcoding command through the ``formats`` map and select a command with the ``--format`` command-line option or the -``format`` configuration.:: +``format`` configuration. + +:: convert: format: speex @@ -102,20 +113,22 @@ This will also use the format key (`wav`) as the file extension. Each entry in the ``formats`` map consists of a key (the name of the -format) as well as the command and the possibly the file extension. +format) as well as the command and optionally the file extension. ``extension`` is the filename extension to be used for newly transcoded -files. If only the command is given as a string, the file extension -defaults to the format’s name. ``command`` is the command-line to use -to transcode audio. The tokens ``$source`` and ``$dest`` in the command -are replaced with the paths to the existing and new file. +files. If only the command is given as a string or the extension is not +provided, the file extension defaults to the format's name. ``command`` is the +command to use to transcode audio. The tokens ``$source`` and ``$dest`` in the +command are replaced with the paths to the existing and new file. The plugin in comes with default commands for the most common audio formats: `mp3`, `alac`, `flac`, `aac`, `opus`, `ogg`, `wmv`. For details have a look at the output of ``beet config -d``. For a one-command-fits-all solution use the ``convert.command`` and -``convert.extension`` options. If these are set the formats are ignored -and the given command is used for all conversions.:: +``convert.extension`` options. If these are set, the formats are ignored +and the given command is used for all conversions. + +:: convert: command: ffmpeg -i $source -y -vn -aq 2 $dest diff -Nru beets-1.3.8+dfsg/docs/plugins/discogs.rst beets-1.3.19/docs/plugins/discogs.rst --- beets-1.3.8+dfsg/docs/plugins/discogs.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/discogs.rst 2016-06-20 01:53:12.000000000 +0000 @@ -9,15 +9,34 @@ Installation ------------ -First, enable the ``discogs`` plugin (see :doc:`/plugins/index`). Then, -install the `discogs-client`_ library by typing:: +To use the ``discogs`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). Then, install the `discogs-client`_ library by typing:: pip install discogs-client -That's it! Matches from Discogs will now show up during import alongside -matches from MusicBrainz. +You will also need to register for a `Discogs`_ account. The first time you +run the :ref:`import-cmd` command after enabling the plugin, it will ask you +to authorize with Discogs by visiting the site in a browser. Subsequent runs +will not require re-authorization. + +Matches from Discogs will now show up during import alongside matches from +MusicBrainz. If you have a Discogs ID for an album you want to tag, you can also enter it at the "enter Id" prompt in the importer. +Troubleshooting +--------------- + +Several issues have been encountered with the Discogs API. If you have one, +please start by searching for `a similar issue on the repo +<https://github.com/beetbox/beets/issues?utf8=%E2%9C%93&q=is%3Aissue+discogs>`_. + +Here are two things you can try: + +* Try deleting the token file (``~/.config/beets/discogs_token.json`` by + default) to force re-authorization. +* Make sure that your system clock is accurate. The Discogs servers can reject + your request if your clock is too out of sync. + .. _discogs-client: https://github.com/discogs/discogs_client diff -Nru beets-1.3.8+dfsg/docs/plugins/duplicates.rst beets-1.3.19/docs/plugins/duplicates.rst --- beets-1.3.8+dfsg/docs/plugins/duplicates.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/duplicates.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,43 +4,20 @@ This plugin adds a new command, ``duplicates`` or ``dup``, which finds and lists duplicate tracks or albums in your collection. -Installation ------------- +Usage +----- -Enable the plugin by putting ``duplicates`` on your ``plugins`` line in -your :doc:`config file </reference/config>`:: - - plugins: duplicates - -Configuration -------------- +To use the ``duplicates`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). By default, the ``beet duplicates`` command lists the names of tracks in your library that are duplicates. It assumes that Musicbrainz track and album ids are unique to each track or album. That is, it lists every track or album with an ID that has been seen before in the library. - You can customize the output format, count the number of duplicate tracks or albums, and list all tracks that have duplicates or just the -duplicates themselves. These options can either be specified in the -config file:: - - duplicates: - checksum: no - copy: no - keys: mb_trackid album - album: no - count: no - delete: no - format: "$albumartist - $album - $title" - full: no - move: no - path: no - tag: no - - -or on the command-line:: +duplicates themselves via command-line switches :: -h, --help show this help message and exit -f FMT, --format=FMT print with custom format @@ -50,89 +27,71 @@ report duplicates based on arbitrary command -d, --delete delete items from library and disk -F, --full show all versions of duplicate tracks or albums + -s, --strict report duplicates only if all attributes are set -k, --keys report duplicates based on keys + -M, --merge merge duplicate items -m DEST, --move=DEST move items to dest -o DEST, --copy=DEST copy items to dest -p, --path print paths for matched items or albums -t TAG, --tag=TAG tag matched items with 'k=v' attribute +Configuration +------------- -format -~~~~~~ - -The ``format`` option (default: :ref:`list_format_item`) lets you -specify a specific format with which to print every track or -album. This uses the same template syntax as beets’ :doc:`path formats -</reference/pathformat>`. The usage is inspired by, and therefore -similar to, the :ref:`list <list-cmd>` command. - -path -~~~~ - -Convenience wrapper for ``-f \$path``. - -count -~~~~~ - -The ``count`` option (default: false) prints a count of duplicate -tracks or albums, with ``format`` hard-coded to ``$albumartist - -$album - $title: $count`` or ``$albumartist - $album: $count`` (for -the ``-a`` option). - -album -~~~~~ - -The ``album`` option (default: false) lists duplicate albums instead -of tracks. - -full -~~~~ - -The ``full`` option (default: false) lists every track or album that -has duplicates, not just the duplicates themselves. - -keys -~~~~ - -The ``keys`` option (default: ``[mb_trackid, mb_albumid]``) defines in which track -or album fields duplicates are to be searched. By default, the plugin -uses the musicbrainz track and album IDs for this purpose. Using the -``keys`` option (as a YAML list in the configuration file, or as -space-delimited strings in the command-line), you can extend this behavior -to consider other attributes. - -checksum -~~~~~~~~ - -The ``checksum`` option (default: ``ffmpeg -i {file} -f crc -``) enables the use of -any arbitrary command to compute a checksum of items. It overrides the ``keys`` -option the first time it is run; however, because it caches the resulting checksums -as ``flexattrs`` in the database, you can use -``--keys=name_of_the_checksumming_program any_other_keys`` the second time around. - -copy -~~~~ - -The ``copy`` option (default: ``no``) takes a destination base directory into which -it will copy matched items. - -move -~~~~ - -The ``move`` option (default: ``no``) takes a destination base directory into which -it will move matched items. - -delete -~~~~~~ - -The ``delete`` option (default: ``no``) removes matched items from the library -and from the disk. - -tag -~~~ +To configure the plugin, make a ``duplicates:`` section in your configuration +file. The available options mirror the command-line options: -The ``tag`` option (default: ``no``) takes a ``key=value`` string, and adds a new -``key`` attribute with ``value`` value as a flexattr to the database. +- **album**: List duplicate albums instead of tracks. + Default: ``no``. +- **checksum**: Use an arbitrary command to compute a checksum + of items. This overrides the ``keys`` option the first time it is run; + however, because it caches the resulting checksum as ``flexattrs`` in the + database, you can use ``--keys=name_of_the_checksumming_program + any_other_keys`` (or set configuration ``keys`` option) the second time + around. + Default: ``ffmpeg -i {file} -f crc -``. +- **copy**: A destination base directory into which to copy matched + items. + Default: none (disabled). +- **count**: Print a count of duplicate tracks or albums in the format + ``$albumartist - $album - $title: $count`` (for tracks) or ``$albumartist - + $album: $count`` (for albums). + Default: ``no``. +- **delete**: Removes matched items from the library and from the disk. + Default: ``no`` +- **format**: A specific format with which to print every track + or album. This uses the same template syntax as beets' + :doc:`path formats</reference/pathformat>`. The usage is inspired by, and + therefore similar to, the :ref:`list <list-cmd>` command. + Default: :ref:`format_item` +- **full**: List every track or album that has duplicates, not just the + duplicates themselves. + Default: ``no`` +- **keys**: Define in which track or album fields duplicates are to be + searched. By default, the plugin uses the musicbrainz track and album IDs for + this purpose. Using the ``keys`` option (as a YAML list in the configuration + file, or as space-delimited strings in the command-line), you can extend this + behavior to consider other attributes. + Default: ``[mb_trackid, mb_albumid]`` +- **merge**: Merge duplicate items by consolidating tracks and-or + metadata where possible. +- **move**: A destination base directory into which it will move matched + items. + Default: none (disabled). +- **path**: Output the path instead of metadata when listing duplicates. + Default: ``no``. +- **strict**: Do not report duplicate matches if some of the + attributes are not defined (ie. null or empty). + Default: ``no`` +- **tag**: A ``key=value`` pair. The plugin will add a new ``key`` attribute + with ``value`` value as a flexattr to the database for duplicate items. + Default: ``no``. +- **tiebreak**: Dictionary of lists of attributes keyed by ``items`` + or ``albums`` to use when choosing duplicates. By default, the + tie-breaking procedure favors the most complete metadata attribute + set. If you would like to consider the lower bitrates as duplicates, + for example, set ``tiebreak: items: [bitrate]``. + Default: ``{}``. Examples -------- @@ -150,7 +109,8 @@ beet duplicates -f '$year' | spark ▆▁▆█▄▇▇▄▇▇▁█▇▆▇▂▄█▁██▂█▁▁██▁█▂▇▆▂▇█▇▇█▆▆▇█▇█▇▆██▂▇ -Print out a listing of all albums with duplicate tracks, and respective counts:: +Print out a listing of all albums with duplicate tracks, and respective +counts:: beet duplicates -ac @@ -162,8 +122,8 @@ beet duplicates -k title albumartist album -Compute Adler CRC32 or MD5 checksums, storing them as flexattrs, and report back -duplicates based on those values:: +Compute Adler CRC32 or MD5 checksums, storing them as flexattrs, and report +back duplicates based on those values:: beet dup -C 'ffmpeg -i {file} -f crc -' beet dup -C 'md5sum {file}' @@ -181,12 +141,15 @@ beet duplicates --delete --keys albumartist albumartist:nickelback Tag duplicate items with some flag:: - + beet duplicates --tag dup=1 -TODO ----- +Ignore items with undefined keys:: + + beet duplicates --strict + +Merge and delete duplicate albums with different missing tracks:: -- better duplicate disambiaguation strategies (eg, based on bitrate, etc) + beet duplicates --album --merge --delete .. _spark: https://github.com/holman/spark diff -Nru beets-1.3.8+dfsg/docs/plugins/echonest.rst beets-1.3.19/docs/plugins/echonest.rst --- beets-1.3.8+dfsg/docs/plugins/echonest.rst 2014-04-14 17:34:01.000000000 +0000 +++ beets-1.3.19/docs/plugins/echonest.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -Echo Nest Plugin -================ - -The ``echonest`` plugin fetches `acoustic attributes`_ from `the Echo Nest`_. -It automatically fills in the following attributes: - -- danceability -- energy -- liveness -- loudness -- speechiness -- bpm - -All attributes except ``bpm`` are stored in flexible attributes (i.e., not -in files' metadata). -See the Echo Nest's page on `acoustic attributes`_ for a detailed description. -(Their name for ``bpm`` is ``tempo``.) - -.. _the Echo Nest: http://the.echonest.com/ -.. _acoustic attributes: http://developer.echonest.com/acoustic-attributes.html - - -Installing Dependencies ------------------------ - -This plugin requires the pyechonest library in order to talk to the Echo Nest -API. At least version 8.0.1 is required. - -There are packages for most major linux distributions, you can download the -library from the Echo Nest, or you can install the library from `pip`_, -like so:: - - $ pip install pyechonest - -To transcode music for server-side analysis (optional, of course), install -the `ffmpeg`_ command-line tool. - -To get fingerprinting working, you'll need to install the command-line -codegen tool for `ENMFP`_ or `Echoprint`_, the two fingerprinting -algorithms supported by the Echo Nest. Please note that fingerprinting is not -required if ``upload`` and ``convert`` is enabled, which is the default (but -it can be faster than uploading). - -.. _pip: http://pip.openplans.org/ -.. _FFmpeg: http://ffmpeg.org -.. _ENMFP: http://static.echonest.com/ENMFP_codegen.zip -.. _Echoprint: http://echoprint.me - - -Configuring ------------ - -Beets includes its own Echo Nest API key, but you can `apply for your own`_ for -free from the Echo Nest. To specify your own API key, add the key to your -:doc:`configuration file </reference/config>` as the value for ``apikey`` under -the key ``echonest_tempo`` like so:: - - echonest: - apikey: YOUR_API_KEY - -In addition, the ``auto`` config option lets you disable automatic metadata -fetching during import. To do so, add this to your ``config.yaml``:: - - echonest: - auto: no - -The ``echonest`` plugin tries to upload files to the Echo Nest server if it -can not be identified by other means. If you don't want that, disable the -``upload`` config option like so:: - - echonest: - upload: no - -The Echo Nest server only supports a limited range of file formats. The plugin -automatically converts unsupported files to ``ogg``. If you don't want that, -disable the ``convert`` config option like so:: - - echonest: - convert: no - -The Echo Nest server does not allow uploading of files with sizes greater than -50MB. The plugin automatically truncates large files to their first 5 -minutes. If you don't want that, disable the ``truncate`` config option like -so:: - - echonest: - truncate: no - -To enable fingerprinting, you'll need to tell the plugin where to find the -Echoprint or ENMFP codegen binary. Use the ``codegen`` key under the -``echonest`` section like so:: - - echonest: - codegen: /usr/bin/echoprint-codegen - -.. _apply for your own: http://developer.echonest.com/account/register - -Running Manually ----------------- - -In addition to running automatically on import, the plugin can also be run manually -from the command line. Use the command ``beet echonest [QUERY]`` to fetch -acoustic attributes for albums matching a certain query. diff -Nru beets-1.3.8+dfsg/docs/plugins/echonest_tempo.rst beets-1.3.19/docs/plugins/echonest_tempo.rst --- beets-1.3.8+dfsg/docs/plugins/echonest_tempo.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/echonest_tempo.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -EchoNest Tempo Plugin -===================== - -.. note:: - - A newer :doc:`echonest` is available that supersedes this plugin. In - addition to the tempo, the new plugin can fetch the Echo Nest's full - complement of acoustic attributes. This older tempo-specific plugin is - **deprecated**. - -The ``echonest_tempo`` plugin fetches and stores a track's tempo (the "bpm" -field) from the `Echo Nest API`_. - -.. _Echo Nest API: http://developer.echonest.com/ - -Installing Dependencies ------------------------ - -This plugin requires the pyechonest library in order to talk to the EchoNest -API. - -There are packages for most major linux distributions, you can download the -library from the Echo Nest, or you can install the library from `pip`_, -like so:: - - $ pip install pyechonest - -.. _pip: http://pip.openplans.org/ - -Configuring ------------ - -Beets includes its own Echo Nest API key, but you can `apply for your own`_ for -free from the EchoNest. To specify your own API key, add the key to your -:doc:`configuration file </reference/config>` as the value for ``apikey`` under -the key ``echonest_tempo`` like so:: - - echonest_tempo: - apikey: YOUR_API_KEY - -In addition, the ``autofetch`` config option lets you disable automatic tempo -fetching during import. To do so, add this to your ``config.yaml``:: - - echonest_tempo: - auto: no - -.. _apply for your own: http://developer.echonest.com/account/register - -Fetch Tempo During Import -------------------------- - -To automatically fetch the tempo for songs you import, just enable the plugin -by putting ``echonest_tempo`` on your config file's ``plugins`` line (see -:doc:`/plugins/index`). When importing new files, beets will now fetch the -tempo for files that don't already have them. The bpm field will be stored in -the beets database. If the ``import.write`` config option is on, then the tempo -will also be written to the files' tags. - -This behavior can be disabled with the ``autofetch`` config option (see below). - -Fetching Tempo Manually ------------------------ - -The ``tempo`` command provided by this plugin fetches tempos for -items that match a query (see :doc:`/reference/query`). For example, -``beet tempo magnetic fields absolutely cuckoo`` will get the tempo for the -appropriate Magnetic Fields song, ``beet tempo magnetic fields`` will get -tempos for all my tracks by that band, and ``beet tempo`` will get tempos for -my entire library. The tempos will be added to the beets database and, if -``import.write`` is on, embedded into files' metadata. - -The ``-p`` option to the ``tempo`` command makes it print tempos out to the -console so you can view the fetched (or previously-stored) tempos. diff -Nru beets-1.3.8+dfsg/docs/plugins/edit.rst beets-1.3.19/docs/plugins/edit.rst --- beets-1.3.8+dfsg/docs/plugins/edit.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/edit.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,60 @@ +Edit Plugin +=========== + +The ``edit`` plugin lets you modify music metadata using your favorite text +editor. + +Enable the ``edit`` plugin in your configuration (see :ref:`using-plugins`) and +then type:: + + beet edit QUERY + +Your text editor (i.e., the command in your ``$EDITOR`` environment variable) +will open with a list of tracks to edit. Make your changes and exit your text +editor to apply them to your music. + +Command-Line Options +-------------------- + +The ``edit`` command has these command-line options: + +- ``-a`` or ``--album``: Edit albums instead of individual items. +- ``-f FIELD`` or ``--field FIELD``: Specify an additional field to edit + (in addition to the defaults set in the configuration). +- ``--all``: Edit *all* available fields. + +Interactive Usage +----------------- + +The ``edit`` plugin can also be invoked during an import session. If enabled, it +adds two new options to the user prompt:: + + [A]pply, More candidates, Skip, Use as-is, as Tracks, Group albums, Enter search, enter Id, aBort, eDit, edit Candidates? + +- ``eDit``: use this option for using the original items' metadata as the + starting point for your edits. +- ``edit Candidates``: use this option for using a candidate's metadata as the + starting point for your edits. + +Please note that currently the interactive usage of the plugin will only allow +you to change the item-level fields. In case you need to edit the album-level +fields, the recommended approach is to invoke the plugin via the command line +in album mode (``beet edit -a QUERY``) after the import. + +Also, please be aware that the ``edit Candidates`` choice can only be used with +the matches found during the initial search (and currently not supporting the +candidates found via the ``Enter search`` or ``enter Id`` choices). You might +find the ``--search-id SEARCH_ID`` :ref:`import-cmd` option useful for those +cases where you already have a specific candidate ID that you want to edit. + +Configuration +------------- + +To configure the plugin, make an ``edit:`` section in your configuration +file. The available options are: + +- **itemfields**: A space-separated list of item fields to include in the + editor by default. + Default: ``track title artist album`` +- **albumfields**: The same when editing albums (with the ``-a`` option). + Default: ``album albumartist`` diff -Nru beets-1.3.8+dfsg/docs/plugins/embedart.rst beets-1.3.19/docs/plugins/embedart.rst --- beets-1.3.8+dfsg/docs/plugins/embedart.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/embedart.rst 2016-06-20 01:53:12.000000000 +0000 @@ -7,18 +7,71 @@ this will take more space than the external-file approach, it is necessary for displaying album art in some media players (iPods, for example). -This plugin was added in beets 1.0b8. - Embedding Art Automatically --------------------------- -To automatically embed discovered album art into imported files, just enable the -plugin (see :doc:`/plugins/index`). You'll also want to enable the +To automatically embed discovered album art into imported files, just enable +the ``embedart`` plugin (see :doc:`/plugins/index`). You'll also want to enable the :doc:`/plugins/fetchart` to obtain the images to be embedded. Art will be -embedded after each album is added to the library. +embedded after each album has its cover art set. This behavior can be disabled with the ``auto`` config option (see below). +.. _image-similarity-check: + +Image Similarity +'''''''''''''''' + +When importing a lot of files with the ``auto`` option, one may be reluctant to +overwrite existing embedded art for all of them. + +You can tell beets to avoid embedding images that are too different from the +existing ones. +This works by computing the perceptual hashes (`PHASH`_) of the two images and +checking that the difference between the two does not exceed a +threshold. You can set the threshold with the ``compare_threshold`` option. + +A threshold of 0 (the default) disables similarity checking and always embeds +new images. Set the threshold to another number---we recommend between 10 and +100---to adjust the sensitivity of the comparison. The smaller the threshold +number, the more similar the images must be. + +This feature requires `ImageMagick`_. + +Configuration +------------- + +To configure the plugin, make an ``embedart:`` section in your configuration +file. The available options are: + +- **auto**: Enable automatic album art embedding. + Default: ``yes``. +- **compare_threshold**: How similar candidate art must be to + existing art to be written to the file (see :ref:`image-similarity-check`). + Default: 0 (disabled). +- **ifempty**: Avoid embedding album art for files that already have art + embedded. + Default: ``no``. +- **maxwidth**: A maximum width to downscale images before embedding + them (the original image file is not altered). The resize operation reduces + image width to at most ``maxwidth`` pixels. The height is recomputed so that + the aspect ratio is preserved. See also :ref:`image-resizing` for further + caveats about image resizing. + Default: 0 (disabled). +- **remove_art_file**: Automatically remove the album art file for the album + after it has been embedded. This option is best used alongside the + :doc:`FetchArt </plugins/fetchart>` plugin to download art with the purpose of + directly embedding it into the file's metadata without an "intermediate" + album art file. + Default: ``no``. + +Note: ``compare_threshold`` option requires `ImageMagick`_, and ``maxwidth`` +requires either `ImageMagick`_ or `Pillow`_. + +.. _Pillow: https://github.com/python-pillow/Pillow +.. _ImageMagick: http://www.imagemagick.org/ +.. _PHASH: http://www.fmwconcepts.com/misc_tests/perceptual_hash_test_results_510/ + Manually Embedding and Extracting Art ------------------------------------- @@ -30,30 +83,19 @@ use a specific image file from the filesystem; otherwise, each album embeds its own currently associated album art. -* ``beet extractart [-o FILE] QUERY``: extracts the image from an item matching - the query and stores it in a file. You can specify the destination file using - the ``-o`` option, but leave off the extension: it will be chosen - automatically. The destination filename defaults to ``cover`` if it's not - specified. +* ``beet extractart [-a] [-n FILE] QUERY``: extracts the images for all albums + matching the query. The images are placed inside the album folder. You can + specify the destination file name using the ``-n`` option, but leave off the + extension: it will be chosen automatically. The destination filename is + specified using the ``art_filename`` configuration option. It defaults to + ``cover`` if it's not specified via ``-o`` nor the config. + Using ``-a``, the extracted image files are automatically associated with the + corresponding album. + +* ``beet extractart -o FILE QUERY``: extracts the image from an item matching + the query and stores it in a file. You have to specify the destination file + using the ``-o`` option, but leave off the extension: it will be chosen + automatically. * ``beet clearart QUERY``: removes all embedded images from all items matching the query. (Use with caution!) - -Configuring ------------ - -The ``auto`` option lets you disable automatic album art embedding. -To do so, add this to your ``config.yaml``:: - - embedart: - auto: no - -A maximum image width can be configured as ``maxwidth`` to downscale images -before embedding them (the original image file is not altered). The resize -operation reduces image width to ``maxwidth`` pixels. The height is recomputed -so that the aspect ratio is preserved. `PIL`_ or `ImageMagick`_ is required to -use the ``maxwidth`` config option. See also :ref:`image-resizing` for further -caveats about image resizing. - -.. _PIL: http://www.pythonware.com/products/pil/ -.. _ImageMagick: http://www.imagemagick.org/ diff -Nru beets-1.3.8+dfsg/docs/plugins/embyupdate.rst beets-1.3.19/docs/plugins/embyupdate.rst --- beets-1.3.8+dfsg/docs/plugins/embyupdate.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/embyupdate.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,33 @@ +EmbyUpdate Plugin +================= + +``embyupdate`` is a plugin that lets you automatically update `Emby`_'s library whenever you change your beets library. + +To use ``embyupdate`` plugin, enable it in your configuration (see :ref:`using-plugins`). Then, you'll probably want to configure the specifics of your Emby server. You can do that using an ``emby:`` section in your ``config.yaml``, which looks like this:: + + emby: + host: localhost + port: 8096 + username: user + password: password + +To use the ``embyupdate`` plugin you need to install the `requests`_ library with:: + + pip install requests + +With that all in place, you'll see beets send the "update" command to your Emby server every time you change your beets library. + +.. _Emby: http://emby.media/ +.. _requests: http://docs.python-requests.org/en/latest/ + +Configuration +------------- + +The available options under the ``emby:`` section are: + +- **host**: The Emby server name. + Default: ``localhost`` +- **port**: The Emby server port. + Default: 8096 +- **username**: A username of a Emby user that is allowed to refresh the library. +- **password**: That user's password. diff -Nru beets-1.3.8+dfsg/docs/plugins/export.rst beets-1.3.19/docs/plugins/export.rst --- beets-1.3.8+dfsg/docs/plugins/export.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/export.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,65 @@ +Export Plugin +============= + +The ``export`` plugin lets you get data from the items and export the content +as `JSON`_. + +.. _JSON: http://www.json.org + +Enable the ``export`` plugin (see :ref:`using-plugins` for help). Then, type ``beet export`` followed by a :doc:`query </reference/query>` to get the data from +your library. For example, run this:: + + $ beet export beatles + +to print a JSON file containing information about your Beatles tracks. + +Command-Line Options +-------------------- + +The ``export`` command has these command-line options: + +* ``--include-keys`` or ``-i``: Choose the properties to include in the output + data. The argument is a comma-separated list of simple glob patterns where + ``*`` matches any string. For example:: + + $ beet export -i 'title,mb*' beatles + + will include the ``title`` property and all properties starting with + ``mb``. You can add the ``-i`` option multiple times to the command + line. + +* ``--library`` or ``-l``: Show data from the library database instead of the + files' tags. + +* ``--output`` or ``-o``: Path for an output file. If not informed, will print + the data in the console. + +* ``--append``: Appends the data to the file instead of writing. + +Configuration +------------- + +To configure the plugin, make a ``export:`` section in your configuration +file. Under the ``json`` key, these options are available: + +- **ensure_ascii**: Escape non-ASCII characters with `\uXXXX` entities. + +- **indent**: The number of spaces for indentation. + +- **separators**: A ``[item_separator, dict_separator]`` tuple. + +- **sort_keys**: Sorts the keys in JSON dictionaries. + +These options match the options from the `Python json module`_. + +.. _Python json module: https://docs.python.org/2/library/json.html#basic-usage + +The default options look like this:: + + export: + json: + formatting: + ensure_ascii: False + indent: 4 + separators: [',' , ': '] + sort_keys: true diff -Nru beets-1.3.8+dfsg/docs/plugins/fetchart.rst beets-1.3.19/docs/plugins/fetchart.rst --- beets-1.3.8+dfsg/docs/plugins/fetchart.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/fetchart.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,7 +4,7 @@ The ``fetchart`` plugin retrieves album art images from various sources on the Web and stores them as image files. -To use the plugin, first enable it in your configuration (see +To use the ``fetchart`` plugin, first enable it in your configuration (see :ref:`using-plugins`). Then, install the `requests`_ library by typing:: pip install requests @@ -23,11 +23,65 @@ album in a file called ``cover.jpg``. To customize the name of this file, use the :ref:`art-filename` config option. -To disable automatic art downloading, just put this in your configuration -file:: +Configuration +------------- + +To configure the plugin, make a ``fetchart:`` section in your configuration +file. The available options are: + +- **auto**: Enable automatic album art fetching during import. + Default: ``yes``. +- **cautious**: Pick only trusted album art by ignoring filenames that do not + contain one of the keywords in ``cover_names``. + Default: ``no``. +- **cover_names**: Prioritize images containing words in this list. + Default: ``cover front art album folder``. +- **minwidth**: Only images with a width bigger or equal to ``minwidth`` are + considered as valid album art candidates. Default: 0. +- **maxwidth**: A maximum image width to downscale fetched images if they are + too big. The resize operation reduces image width to at most ``maxwidth`` + pixels. The height is recomputed so that the aspect ratio is preserved. +- **enforce_ratio**: Only images with a width:height ratio of 1:1 are + considered as valid album art candidates if set to ``yes``. + It is also possible to specify a certain deviation to the exact ratio to + still be considered valid. This can be done either in pixels + (``enforce_ratio: 10px``) or as a percentage of the longer edge + (``enforce_ratio: 0.5%``). Default: ``no``. +- **sources**: List of sources to search for images. An asterisk `*` expands + to all available sources. + Default: ``filesystem coverart itunes amazon albumart``, i.e., everything but + ``wikipedia``, ``google`` and ``fanarttv``. Enable those sources for more + matches at the cost of some speed. They are searched in the given order, + thus in the default config, no remote (Web) art source are queried if + local art is found in the filesystem. To use a local image as fallback, + move it to the end of the list. +- **google_key**: Your Google API key (to enable the Google Custom Search + backend). + Default: None. +- **google_engine**: The custom search engine to use. + Default: The `beets custom search engine`_, which searches the entire web. + **fanarttv_key**: The personal API key for requesting art from + fanart.tv. See below. +- **store_source**: If enabled, fetchart stores the artwork's source in a + flexible tag named ``art_source``. See below for the rationale behind this. + Default: ``no``. + +Note: ``minwidth`` and ``enforce_ratio`` options require either `ImageMagick`_ +or `Pillow`_. + +.. _beets custom search engine: https://cse.google.com.au:443/cse/publicurl?cx=001442825323518660753:hrh5ch1gjzm +.. _Pillow: https://github.com/python-pillow/Pillow +.. _ImageMagick: http://www.imagemagick.org/ + +Here's an example that makes plugin select only images that contain *front* or +*back* keywords in their filenames and prioritizes the iTunes source over +others:: fetchart: - auto: no + cautious: true + cover_names: front back + sources: itunes * + Manually Fetching Album Art --------------------------- @@ -47,31 +101,29 @@ Image Resizing -------------- -A maximum image width can be configured as ``maxwidth`` to downscale fetched -images if they are too big. The resize operation reduces image width to -``maxwidth`` pixels. The height is recomputed so that the aspect ratio is -preserved. - -Beets can resize images using `PIL`_, `ImageMagick`_, or a server-side resizing -proxy. If either PIL or ImageMagick is installed, beets will use those; +Beets can resize images using `Pillow`_, `ImageMagick`_, or a server-side resizing +proxy. If either Pillow or ImageMagick is installed, beets will use those; otherwise, it falls back to the resizing proxy. If the resizing proxy is used, no resizing is performed for album art found on the filesystem---only downloaded art is resized. Server-side resizing can also be slower than local resizing, so consider installing one of the two backends for better performance. -When using ImageMagic, beets looks for the ``convert`` executable in your path. +When using ImageMagick, beets looks for the ``convert`` executable in your path. On some versions of Windows, the program can be shadowed by a system-provided ``convert.exe``. On these systems, you may need to modify your ``%PATH%`` -environment variable so that ImageMagick comes first or use PIL instead. +environment variable so that ImageMagick comes first or use Pillow instead. -.. _PIL: http://www.pythonware.com/products/pil/ +.. _Pillow: https://github.com/python-pillow/Pillow .. _ImageMagick: http://www.imagemagick.org/ Album Art Sources ----------------- -Currently, this plugin searches for art in the local filesystem as well as on -the Cover Art Archive, Amazon, and AlbumArt.org (in that order). +By default, this plugin searches for art in the local filesystem as well as on +the Cover Art Archive, the iTunes Store, Amazon, and AlbumArt.org, in that +order. +You can reorder the sources or remove +some to speed up the process using the ``sources`` configuration option. When looking for local album art, beets checks for image files located in the same folder as the music files you're importing. Beets prefers to use an image @@ -79,39 +131,72 @@ the absence of well-known names, it will use any image file in the same folder as your music files. -You can change the list of filename keywords using the ``cover_names`` config -option. Or, to use *only* filenames containing the keywords and not fall back -to any image, set ``cautious`` to true. For example:: - - fetchart: - cautious: true - cover_names: front back - -By default, remote (Web) art sources are only queried if no local art is found -in the filesystem. To query remote sources every time, set the -``remote_priority`` configuration option to true, which will cause beets to -prefer remote cover art over any local image files. - When you choose to apply changes during an import, beets will search for art as described above. For "as-is" imports (and non-autotagged imports using the ``-A`` flag), beets only looks for art on the local filesystem. -Google Image Search -''''''''''''''''''' +iTunes Store +'''''''''''' -You can optionally search for cover art on `Google Images`_. This option uses -the first hit for a search query consisting of the artist and album name. It -is therefore approximate: "incorrect" image matches are possible (although -unlikely). +To use the iTunes Store as an art source, install the `python-itunes`_ +library. You can do this using `pip`_, like so:: -.. _Google Images: http://images.google.com/ + $ pip install https://github.com/ocelma/python-itunes/archive/master.zip -To enable gathering art from Google, enable the ``google_search`` option in -your config file:: +(There's currently `a problem`_ that prevents a plain ``pip install +python-itunes`` from working.) +Once the library is installed, the plugin will use it to search automatically. - fetchart: - google_search: true - +.. _a problem: https://github.com/ocelma/python-itunes/issues/9 +.. _python-itunes: https://github.com/ocelma/python-itunes +.. _pip: http://pip.openplans.org/ + +Google custom search +'''''''''''''''''''' + +To use the google image search backend you need to +`register for a Google API key`_. Set the ``google_key`` configuration +option to your key, then add ``google`` to the list of sources in your +configuration. + +.. _register for a Google API key: https://code.google.com/apis/console. + +Optionally, you can `define a custom search engine`_. Get your search engine's +token and use it for your ``google_engine`` configuration option. The +default engine searches the entire web for cover art. + +.. _define a custom search engine: http://www.google.com/cse/all + +Note that the Google custom search API is limited to 100 queries per day. +After that, the fetchart plugin will fall back on other declared data sources. + +Fanart.tv +''''''''' + +Although not strictly necessary right now, you might think about +`registering a personal fanart.tv API key`_. Set the ``fanarttv_key`` +configuration option to your key, then add ``fanarttv`` to the list of sources +in your configuration. + +.. _registering a personal fanart.tv API key: https://fanart.tv/get-an-api-key/ + +More detailed information can be found `on their blog`_. Specifically, the +personal key will give you earlier access to new art. + +.. _on their blog: https://fanart.tv/2015/01/personal-api-keys/ + +Storing the Artwork's Source +---------------------------- + +Storing the current artwork's source might be used to narrow down +``fetchart`` commands. For example, if some albums have artwork placed +manually in their directories that should not be replaced by a forced +album art fetch, you could do + +``beet fetchart -f ^art_source:filesystem`` + +The values written to ``art_source`` are the same names used in the ``sources`` +configuration value. Embedding Album Art ------------------- diff -Nru beets-1.3.8+dfsg/docs/plugins/filefilter.rst beets-1.3.19/docs/plugins/filefilter.rst --- beets-1.3.8+dfsg/docs/plugins/filefilter.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/filefilter.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,30 @@ +FileFilter Plugin +================= + +The ``filefilter`` plugin allows you to skip files during import using +regular expressions. + +To use the ``filefilter`` plugin, enable it in your configuration (see +:ref:`using-plugins`). + +Configuration +------------- + +To configure the plugin, make a ``filefilter:`` section in your +configuration file. The available options are: + +- **path**: A regular expression to filter files based on their path and name. + Default: ``.*`` (import everything) +- **album_path** and **singleton_path**: You may specify different regular + expressions used for imports of albums and singletons. This way, you can + automatically skip singletons when importing albums if the names (and paths) + of the files are distinguishable via a regex. The regexes defined here + take precedence over the global ``path`` option. + +Here's an example:: + + filefilter: + path: .*\d\d[^/]+$ + # will only import files which names start with two digits + album_path: .*\d\d[^/]+$ + singleton_path: .*/(?!\d\d)[^/]+$ diff -Nru beets-1.3.8+dfsg/docs/plugins/freedesktop.rst beets-1.3.19/docs/plugins/freedesktop.rst --- beets-1.3.8+dfsg/docs/plugins/freedesktop.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/freedesktop.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,6 @@ +Freedesktop Plugin +================== + +The ``freedesktop`` plugin created .directory files in your album folders. +This plugin is now deprecated and replaced by the :doc:`/plugins/thumbnails` +with the `dolphin` option enabled. diff -Nru beets-1.3.8+dfsg/docs/plugins/fromfilename.rst beets-1.3.19/docs/plugins/fromfilename.rst --- beets-1.3.8+dfsg/docs/plugins/fromfilename.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/fromfilename.rst 2016-06-20 01:53:12.000000000 +0000 @@ -9,6 +9,5 @@ look at the track's filename and guess its track number, title, and artist. These will be used to search in MusicBrainz and match track ordering. -To use the plugin, just enable it by putting ``fromfilename`` on the -``plugins:`` line in your config file. There are currently no configuration -options. +To use the ``fromfilename`` plugin, enable it in your configuration +(see :ref:`using-plugins`). diff -Nru beets-1.3.8+dfsg/docs/plugins/ftintitle.rst beets-1.3.19/docs/plugins/ftintitle.rst --- beets-1.3.8+dfsg/docs/plugins/ftintitle.rst 2014-04-27 03:49:51.000000000 +0000 +++ beets-1.3.19/docs/plugins/ftintitle.rst 2016-06-20 01:53:12.000000000 +0000 @@ -1,7 +1,7 @@ FtInTitle Plugin ================ -The ``ftintitle`` plugin automatically moved "featured" artists from the +The ``ftintitle`` plugin automatically moves "featured" artists from the ``artist`` field to the ``title`` field. According to `MusicBrainz style`_, featured artists are part of the artist @@ -10,15 +10,35 @@ RZA". If you prefer to tag this as "Tellin' Me Things feat. RZA" by "Blakroc", then this plugin is for you. -To use the plugin, just enable it and run the command:: +To use the ``ftintitle`` plugin, enable it in your configuration +(see :ref:`using-plugins`). + +Configuration +------------- + +To configure the plugin, make a ``ftintitle:`` section in your configuration +file. The available options are: + +- **auto**: Enable metadata rewriting during import. + Default: ``yes``. +- **drop**: Remove featured artists entirely instead of adding them to the + title field. + Default: ``no``. +- **format**: Defines the format for the featuring X part of the new title field. + In this format the ``{0}`` is used to define where the featured artists are placed. + Default: ``feat. {0}`` + +Running Manually +---------------- + +From the command line, type:: $ beet ftintitle [QUERY] The query is optional; if it's left off, the transformation will be applied to your entire collection. -If you prefer to remove featured artists entirely instead of adding them to -the title field, either use the ``-d`` flag to the command or set the -``ftintitle.drop`` config option. +Use the ``-d`` flag to remove featured artists (equivalent of the ``drop`` +config option). .. _MusicBrainz style: http://musicbrainz.org/doc/Style diff -Nru beets-1.3.8+dfsg/docs/plugins/fuzzy.rst beets-1.3.19/docs/plugins/fuzzy.rst --- beets-1.3.8+dfsg/docs/plugins/fuzzy.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/fuzzy.rst 2016-06-20 01:53:12.000000000 +0000 @@ -1,25 +1,24 @@ Fuzzy Search Plugin =================== -The ``fuzzy`` plugin provides a prefixed query that search you library using +The ``fuzzy`` plugin provides a prefixed query that searches your library using fuzzy pattern matching. This can be useful if you want to find a track with complicated characters in the title. -First, enable the plugin named ``fuzzy`` (see :doc:`/plugins/index`). +First, enable the plugin named ``fuzzy`` (see :ref:`using-plugins`). You'll then be able to use the ``~`` prefix to use fuzzy matching:: $ beet ls '~Vareoldur' Sigur Rós - Valtari - Varðeldur -The plugin provides config options that let you choose the prefix and the -threshold.:: +Configuration +------------- - fuzzy: - threshold: 0.8 - prefix: '@' +To configure the plugin, make a ``fuzzy:`` section in your configuration +file. The available options are: -A threshold value of 1.0 will show only perfect matches and a value of 0.0 -will match everything. - -The default prefix ``~`` needs to be escaped or quoted in most shells. If this -bothers you, you can change the prefix in your config file. +- **threshold**: The "sensitivity" of the fuzzy match. A value of 1.0 will + show only perfect matches and a value of 0.0 will match everything. + Default: 0.7. +- **prefix**: The character used to designate fuzzy queries. + Default: ``~``, which may need to be escaped in some shells. diff -Nru beets-1.3.8+dfsg/docs/plugins/hook.rst beets-1.3.19/docs/plugins/hook.rst --- beets-1.3.8+dfsg/docs/plugins/hook.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/hook.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,72 @@ +Hook Plugin +=========== + +Internally, beets uses *events* to tell plugins when something happens. For +example, one event fires when the importer finishes processes a song, and +another triggers just before the ``beet`` command exits. +The ``hook`` plugin lets you run commands in response to these events. + +.. _hook-configuration: + +Configuration +------------- + +To configure the plugin, make a ``hook`` section in your configuration +file. The available options are: + +- **hooks**: A list of events and the commands to run + (see :ref:`individual-hook-configuration`). Default: Empty. + +.. _individual-hook-configuration: + +Configuring Each Hook +''''''''''''''''''''' + +Each element under ``hooks`` should have these keys: + +- **event**: The name of the event that will trigger this hook. + See the :ref:`plugin events <plugin_events>` documentation for a list + of possible values. +- **command**: The command to run when this hook executes. + +.. _command-substitution: + +Command Substitution +'''''''''''''''''''' + +Commands can access the parameters of events using `Python string +formatting`_. Use ``{name}`` in your command and the plugin will substitute it +with the named value. The name can also refer to a field, as in +``{album.path}``. + +.. _Python string formatting: https://www.python.org/dev/peps/pep-3101/ + +You can find a list of all available events and their arguments in the +:ref:`plugin events <plugin_events>` documentation. + +Example Configuration +--------------------- + +.. code-block:: yaml + + hook: + hooks: + # Output on exit: + # beets just exited! + # have a nice day! + - event: cli_exit + command: echo "beets just exited!" + - event: cli_exit + command: echo "have a nice day!" + + # Output on item import: + # importing "<file_name_here>" + # Where <file_name_here> is the item being imported + - event: item_imported + command: echo "importing \"{item.path}\"" + + # Output on write: + # writing to "<file_name_here>" + # Where <file_name_here> is the file being written to + - event: write + command: echo "writing to {path}" diff -Nru beets-1.3.8+dfsg/docs/plugins/ihate.rst beets-1.3.19/docs/plugins/ihate.rst --- beets-1.3.8+dfsg/docs/plugins/ihate.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/ihate.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,18 +6,28 @@ :doc:`/reference/query`) and the plugin skips (or warns about) albums or items that match any query. -To use the plugin, first enable it in your configuration (see -:ref:`using-plugins`). Then, add an ``ihate:`` section to your configuration -file:: +To use the ``ihate`` plugin, enable it in your configuration (see +:ref:`using-plugins`). + +Configuration +------------- + +To configure the plugin, make an ``ihate:`` section in your configuration +file. The available options are: + +- **skip**: Never import items and albums that match a query in this list. + Default: ``[]`` (empty list). +- **warn**: Print a warning message for matches in this list of queries. + Default: ``[]``. + +Here's an example:: ihate: - # Print a warning message for these. warn: - artist:rnb - - genre: soul + - genre:soul # Only warn about tribute albums in rock genre. - genre:rock album:tribute - # Never import any of this. skip: - genre::russian\srock - genre:polka diff -Nru beets-1.3.8+dfsg/docs/plugins/importadded.rst beets-1.3.19/docs/plugins/importadded.rst --- beets-1.3.8+dfsg/docs/plugins/importadded.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/importadded.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,6 +4,12 @@ The ``importadded`` plugin is useful when an existing collection is imported and the time when albums and items were added should be preserved. +To use the ``importadded`` plugin, enable it in your configuration (see +:ref:`using-plugins`). + +Usage +----- + The :abbr:`mtime (modification time)` of files that are imported into the library are assumed to represent the time when the items were originally added. @@ -12,14 +18,15 @@ * For singleton items with no album, ``item.added`` is set to the item's file mtime before it was imported. -* For items that are part of an album, ``album.added`` and ``item.added`` is +* For items that are part of an album, ``album.added`` and ``item.added`` are set to the oldest mtime of the files in the album before they were imported. - The mtime of album directories are ignored. + The mtime of album directories is ignored. -This plugin can optionally be configured to also preserve mtimes:: +This plugin can optionally be configured to also preserve mtimes at +import using the ``preserve_mtimes`` option. - importadded: - preserve_mtimes: yes # default: no +When ``preserve_write_mtimes`` option is set, this plugin preserves +mtimes after each write to files using the ``item.added`` attribute. File modification times are preserved as follows: @@ -31,3 +38,23 @@ Note that there is no ``album.mtime`` field in the database and that the mtime of album directories on disk aren't preserved. + +Configuration +------------- + +To configure the plugin, make an ``importadded:`` section in your +configuration file. There are two options available: + +- **preserve_mtimes**: After importing files, re-set their mtimes to their + original value. + Default: ``no``. + +- **preserve_write_mtimes**: After writing files, re-set their mtimes to their + original value. + Default: ``no``. + +Reimport +-------- + +This plugin will skip reimported singleton items and reimported albums and all +of their items. diff -Nru beets-1.3.8+dfsg/docs/plugins/importfeeds.rst beets-1.3.19/docs/plugins/importfeeds.rst --- beets-1.3.8+dfsg/docs/plugins/importfeeds.rst 2014-09-14 20:06:13.000000000 +0000 +++ beets-1.3.19/docs/plugins/importfeeds.rst 2016-06-20 01:53:12.000000000 +0000 @@ -1,30 +1,41 @@ ImportFeeds Plugin ================== -The ``importfeeds`` plugin helps you keep track of newly imported music in your library. +This plugin helps you keep track of newly imported music in your library. -To use the plugin, just put ``importfeeds`` on the ``plugins`` line in your -:doc:`configuration file </reference/config>`. Then set a few options under the -``importfeeds:`` section in the config file. - -The ``dir`` configuration option can be set to specify another folder -than the default library directory. This is where your playlist will be created. - -The ``relative_to`` configuration option can be set to make the m3u paths -relative to another folder than where the playlist is being written. If you're -using importfeeds to generate a playlist for MPD, you should set this to the -root of your music library. - -The ``absolute_path`` configuration option can be set to use absolute paths -instead of relative paths. Some applications may need this to work properly. - -Four different types of outputs are available. Specify the ones you want to -use by setting the ``formats`` parameter: - -- ``m3u``: catalog the imports in a centralized playlist. By default, the playlist is named ``imported.m3u``. To use a different file, just set the ``m3u_name`` parameter inside the ``importfeeds`` config section. -- ``m3u_multi``: create a new playlist for each import (uniquely named by appending the date and track/album name). -- ``link``: create a symlink for each imported item. This is the recommended setting to propagate beets imports to your iTunes library: just drag and drop the ``dir`` folder on the iTunes dock icon. -- ``echo``: do not write a playlist file at all, but echo a list of new file paths to the terminal. +To use the ``importfeeds`` plugin, enable it in your configuration +(see :ref:`using-plugins`). + +Configuration +------------- + +To configure the plugin, make an ``importfeeds:`` section in your +configuration file. The available options are: + +- **absolute_path**: Use absolute paths instead of relative paths. Some + applications may need this to work properly. + Default: ``no``. +- **dir**: The output directory. + Default: Your beets library directory. +- **formats**: Select the kind of output. Use one or more of: + + - **m3u**: Catalog the imports in a centralized playlist. + - **m3u_multi**: Create a new playlist for each import (uniquely named by + appending the date and track/album name). + - **link**: Create a symlink for each imported item. This is the + recommended setting to propagate beets imports to your iTunes library: + just drag and drop the ``dir`` folder on the iTunes dock icon. + - **echo**: Do not write a playlist file at all, but echo a list of new + file paths to the terminal. + + Default: None. +- **m3u_name**: Playlist name used by the ``m3u`` format. + Default: ``imported.m3u``. +- **relative_to**: Make the m3u paths relative to another + folder than where the playlist is being written. If you're using importfeeds + to generate a playlist for MPD, you should set this to the root of your music + library. + Default: None. Here's an example configuration for this plugin:: diff -Nru beets-1.3.8+dfsg/docs/plugins/index.rst beets-1.3.19/docs/plugins/index.rst --- beets-1.3.8+dfsg/docs/plugins/index.rst 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/docs/plugins/index.rst 2016-06-26 00:42:09.000000000 +0000 @@ -13,56 +13,76 @@ ------------- To use one of the plugins included with beets (see the rest of this page for a -list), just use the `plugins` option in your :doc:`config.yaml </reference/config>`: file, like so:: +list), just use the `plugins` option in your :doc:`config.yaml </reference/config>` file, like so:: - plugins: inline discogs web + plugins: inline convert web The value for `plugins` can be a space-separated list of plugin names or a YAML list like ``[foo, bar]``. You can see which plugins are currently enabled by typing ``beet version``. +Each plugin has its own set of options that can be defined in a section bearing its name:: + + plugins: inline convert web + + convert: + auto: true + .. toctree:: :hidden: - chroma - lyrics - echonest_tempo - echonest + acousticbrainz + badfiles + beatport bpd - mpdupdate - mpdstats - fetchart + bpm + bucket + chroma + convert + discogs + duplicates + edit embedart - web - lastgenre - replaygain - inline - scrub - rewrite - random - mbcollection - importfeeds - the + embyupdate + export + fetchart + fromfilename + ftintitle fuzzy - zero + freedesktop + hook ihate - convert + importadded + importfeeds info - play - smartplaylist + inline + ipfs + keyfinder + lastgenre + lastimport + lyrics + mbcollection + mbsubmit mbsync + metasync missing - duplicates - discogs - beatport - fromfilename - ftintitle - keyfinder - bucket - importadded - bpm + mpdstats + mpdupdate + permissions + play + plexupdate + random + filefilter + replaygain + rewrite + scrub + smartplaylist spotify + the + thumbnails types + web + zero Autotagger Extensions --------------------- @@ -73,72 +93,86 @@ * :doc:`fromfilename`: Guess metadata for untagged tracks from their filenames. -.. _Beatport: http://www.beatport.com/ .. _Discogs: http://www.discogs.com/ Metadata -------- -* :doc:`lyrics`: Automatically fetch song lyrics. -* :doc:`echonest`: Automatically fetch `acoustic attributes`_ from - `the Echo Nest`_ (tempo, energy, danceability, ...). -* :doc:`lastgenre`: Fetch genres based on Last.fm tags. -* :doc:`mbsync`: Fetch updated metadata from MusicBrainz -* :doc:`fetchart`: Fetch album cover art from various sources. +* :doc:`acousticbrainz`: Fetch various AcousticBrainz metadata +* :doc:`bpm`: Measure tempo using keystrokes. +* :doc:`edit`: Edit metadata from a text editor. * :doc:`embedart`: Embed album art images into files' metadata. -* :doc:`replaygain`: Calculate volume normalization for players that support it. -* :doc:`scrub`: Clean extraneous metadata from music files. -* :doc:`zero`: Nullify fields by pattern or unconditionally. +* :doc:`fetchart`: Fetch album cover art from various sources. * :doc:`ftintitle`: Move "featured" artists from the artist field to the title field. -* :doc:`mpdstats`: Connect to `MPD`_ and update the beets library with play - statistics (last_played, play_count, skip_count, rating). * :doc:`keyfinder`: Use the `KeyFinder`_ program to detect the musical key from the audio. * :doc:`importadded`: Use file modification times for guessing the value for the `added` field in the database. -* :doc:`bpm`: Determine bpm from keystrokes +* :doc:`lastgenre`: Fetch genres based on Last.fm tags. +* :doc:`lastimport`: Collect play counts from Last.fm. +* :doc:`lyrics`: Automatically fetch song lyrics. +* :doc:`mbsync`: Fetch updated metadata from MusicBrainz +* :doc:`metasync`: Fetch metadata from local or remote sources +* :doc:`mpdstats`: Connect to `MPD`_ and update the beets library with play + statistics (last_played, play_count, skip_count, rating). +* :doc:`replaygain`: Calculate volume normalization for players that support it. +* :doc:`scrub`: Clean extraneous metadata from music files. +* :doc:`zero`: Nullify fields by pattern or unconditionally. -.. _Acoustic Attributes: http://developer.echonest.com/acoustic-attributes.html -.. _the Echo Nest: http://www.echonest.com .. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/ Path Formats ------------ +* :doc:`bucket`: Group your files into bucket directories that cover different + field values ranges. * :doc:`inline`: Use Python snippets to customize path format strings. * :doc:`rewrite`: Substitute values in path formats. * :doc:`the`: Move patterns in path formats (i.e., move "a" and "the" to the end). -* :doc:`bucket`: Group your files into bucket directories that cover different - field values ranges. Interoperability ---------------- +* :doc:`embyupdate`: Automatically notifies `Emby`_ whenever the beets library changes. +* :doc:`importfeeds`: Keep track of imported files via ``.m3u`` playlist file(s) or symlinks. +* :doc:`ipfs`: Import libraries from friends and get albums from them via ipfs. * :doc:`mpdupdate`: Automatically notifies `MPD`_ whenever the beets library changes. -* :doc:`importfeeds`: Keep track of imported files via ``.m3u`` playlist file(s) or symlinks. -* :doc:`smartplaylist`: Generate smart playlists based on beets queries. * :doc:`play`: Play beets queries in your music player. +* :doc:`plexupdate`: Automatically notifies `Plex`_ whenever the beets library + changes. +* :doc:`smartplaylist`: Generate smart playlists based on beets queries. +* :doc:`thumbnails`: Get thumbnails with the cover art on your album folders. +* :doc:`badfiles`: Check audio file integrity. + + +.. _Emby: http://emby.media +.. _Plex: http://plex.tv Miscellaneous ------------- -* :doc:`web`: An experimental Web-based GUI for beets. -* :doc:`random`: Randomly choose albums and tracks from your library. -* :doc:`fuzzy`: Search albums and tracks with fuzzy string matching. -* :doc:`mbcollection`: Maintain your MusicBrainz collection list. -* :doc:`ihate`: Automatically skip albums and tracks during the import process. * :doc:`bpd`: A music player for your beets library that emulates `MPD`_ and is compatible with `MPD clients`_. * :doc:`convert`: Transcode music and embed album art while exporting to a different directory. +* :doc:`duplicates`: List duplicate tracks or albums. +* :doc:`export`: Export data from queries to a format. +* :doc:`fuzzy`: Search albums and tracks with fuzzy string matching. +* :doc:`hook`: Run a command when an event is emitted by beets. +* :doc:`ihate`: Automatically skip albums and tracks during the import process. * :doc:`info`: Print music files' tags to the console. +* :doc:`mbcollection`: Maintain your MusicBrainz collection list. +* :doc:`mbsubmit`: Print an album's tracks in a MusicBrainz-friendly format. * :doc:`missing`: List missing tracks. -* :doc:`duplicates`: List duplicate tracks or albums. +* :doc:`random`: Randomly choose albums and tracks from your library. +* :doc:`filefilter`: Automatically skip files during the import process based + on regular expressions. * :doc:`spotify`: Create Spotify playlists from the Beets library. * :doc:`types`: Declare types for flexible attributes. +* :doc:`web`: An experimental Web-based GUI for beets. .. _MPD: http://www.musicpd.org/ .. _MPD clients: http://mpd.wikia.com/wiki/Clients @@ -180,14 +214,32 @@ * `beets-check`_ automatically checksums your files to detect corruption. +* `beets-alternatives`_ manages external files. + +* `beets-follow`_ lets you check for new albums from artists you like. + +* `beets-setlister`_ generate playlists from the setlists of a given artist. + +* `beets-noimport`_ adds and removes directories from the incremental import skip list. + +* `whatlastgenre`_ fetches genres from various music sites. + +* `beets-usertag`_ lets you use keywords to tag and organize your music. + .. _beets-check: https://github.com/geigerzaehler/beets-check .. _copyartifacts: https://github.com/sbarakat/beets-copyartifacts .. _dsedivec: https://github.com/dsedivec/beets-plugins .. _beets-artistcountry: https://github.com/agrausem/beets-artistcountry -.. _beetFs: http://code.google.com/p/beetfs/ +.. _beetFs: https://code.google.com/p/beetfs/ .. _Beet-MusicBrainz-Collection: https://github.com/jeffayle/Beet-MusicBrainz-Collection/ .. _A cmus plugin: https://github.com/coolkehon/beets/blob/master/beetsplug/cmus.py .. _cmus: http://cmus.sourceforge.net/ .. _beet-amazon: https://github.com/jmwatte/beet-amazon +.. _beets-alternatives: https://github.com/geigerzaehler/beets-alternatives +.. _beets-follow: https://github.com/nolsto/beets-follow +.. _beets-setlister: https://github.com/tomjaspers/beets-setlister +.. _beets-noimport: https://gitlab.com/tiago.dias/beets-noimport +.. _whatlastgenre: https://github.com/YetAnotherNerd/whatlastgenre/tree/master/plugin/beets +.. _beets-usertag: https://github.com/igordertigor/beets-usertag diff -Nru beets-1.3.8+dfsg/docs/plugins/info.rst beets-1.3.19/docs/plugins/info.rst --- beets-1.3.8+dfsg/docs/plugins/info.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/info.rst 2016-06-20 01:53:12.000000000 +0000 @@ -5,7 +5,8 @@ any file format supported by beets. It works like a supercharged version of `mp3info`_ or `id3v2`_. -Enable the plugin and then type:: +Enable the ``info`` plugin in your configuration (see :ref:`using-plugins`) and +then type:: $ beet info /path/to/music.flac @@ -17,13 +18,28 @@ $ beet info beatles -Command-line options include: +If you just want to see specific properties you can use the +``--include-keys`` option to filter them. The argument is a +comma-separated list of simple glob patterns where ``*`` matches any +string. For example:: + + $ beet info -i 'title,mb*' beatles + +Will only show the ``title`` property and all properties starting with +``mb``. You can add the ``-i`` option multiple times to the command +line. + +Additional command-line options include: * ``--library`` or ``-l``: Show data from the library database instead of the files' tags. * ``--summarize`` or ``-s``: Merge all the information from multiple files into a single list of values. If the tags differ across the files, print ``[various]``. +* ``--format`` or ``-f``: Specify a specific format with which to print every + item. This uses the same template syntax as beets’ :doc:`path formats + </reference/pathformat>`. +* ``--keys-only`` or ``-k``: Show the name of the tags without the values. .. _id3v2: http://id3v2.sourceforge.net .. _mp3info: http://www.ibiblio.org/mp3info/ diff -Nru beets-1.3.8+dfsg/docs/plugins/inline.rst beets-1.3.19/docs/plugins/inline.rst --- beets-1.3.8+dfsg/docs/plugins/inline.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/inline.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,9 +6,9 @@ to them from your template strings in the ``paths:`` section (see :doc:`/reference/config/`). -To use inline field definitions, first enable the plugin by putting ``inline`` -on your ``plugins`` line in your configuration file. Then, make a -``item_fields:`` block in your config file. Under this key, every line defines a +To use the ``inline`` plugin, enable it in your configuration +(see :ref:`using-plugins`). +Then, make a ``item_fields:`` block in your config file. Under this key, every line defines a new template field; the key is the name of the field (you'll use the name to refer to the field in your templates) and the value is a Python expression or function body. The Python code has all of a track's fields in scope, so you can @@ -42,7 +42,7 @@ item_fields: filename: | import os - from beets.util import bytestring_path + from beets.util import bytestring_path return bytestring_path(os.path.basename(path)) You might want to use the YAML syntax for "block literals," in which a leading diff -Nru beets-1.3.8+dfsg/docs/plugins/ipfs.rst beets-1.3.19/docs/plugins/ipfs.rst --- beets-1.3.8+dfsg/docs/plugins/ipfs.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/ipfs.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,72 @@ +IPFS Plugin +=========== + +The ``ipfs`` plugin makes it easy to share your library and music with friends. +The plugin uses `ipfs`_ for storing the library and file content. + +.. _ipfs: http://ipfs.io/ + +Installation +------------ + +This plugin requires `go-ipfs`_ to be running as a daemon and that the +associated ``ipfs`` command is on the user's ``$PATH``. + +.. _go-ipfs: https://github.com/ipfs/go-ipfs + +Once you have the client installed, enable the ``ipfs`` plugin in your +configuration (see :ref:`using-plugins`). + +Usage +----- + +This plugin can store and retrieve music individually, or it can share entire +library databases. + +Adding +'''''' + +To add albums to ipfs, making them shareable, use the ``-a`` or ``--add`` +flag. If used without arguments it will add all albums in the local library. +When added, all items and albums will get an "ipfs" field in the database +containing the hash of that specific file/folder. Newly imported albums will +be added automatically to ipfs by default (see below). + +Retrieving +'''''''''' + +You can give the ipfs hash for some music to a friend. They can get that album +from ipfs, and import it into beets, using the ``-g`` or ``--get`` flag. If +the argument passed to the ``-g`` flag isn't an ipfs hash, it will be used as +a query instead, getting all albums matching the query. + +Sharing Libraries +''''''''''''''''' + +Using the ``-p`` or ``--publish`` flag, a copy of the local library will be +published to ipfs. Only albums/items with ipfs records in the database will +published, and local paths will be stripped from the library. A hash of the +library will be returned to the user. + +A friend can then import this remote library by using the ``-i`` or +``--import`` flag. To tag an imported library with a specific name by passing +a name as the second argument to ``-i,`` after the hash. The content of all +remote libraries will be combined into an additional library as long as the +content doesn't already exist in the joined library. + +When remote libraries has been imported you can search them by using the +``-l`` or ``--list`` flag. The hash of albums matching the query will be +returned, and this can then be used with ``-g`` to fetch and import the album +to the local library. + +Ipfs can be mounted as a FUSE file system. This means that music in a remote +library can be streamed directly, without importing them to the local library +first. If the ``/ipfs`` folder is mounted then matching queries will be sent +to the :doc:`/plugins/play` using the ``-m`` or ``--play`` flag. + +Configuration +------------- + +The ipfs plugin will automatically add imported albums to ipfs and add those +hashes to the database. This can be turned off by setting the ``auto`` option +in the ``ipfs:`` section of the config to ``no``. diff -Nru beets-1.3.8+dfsg/docs/plugins/keyfinder.rst beets-1.3.19/docs/plugins/keyfinder.rst --- beets-1.3.8+dfsg/docs/plugins/keyfinder.rst 2014-04-12 20:40:45.000000000 +0000 +++ beets-1.3.19/docs/plugins/keyfinder.rst 2016-06-20 01:53:12.000000000 +0000 @@ -3,26 +3,30 @@ The `keyfinder` plugin uses the `KeyFinder`_ program to detect the musical key of track from its audio data and store it in the -`initial_key` field of you database. If enabled, it does so +`initial_key` field of your database. It does so automatically when importing music or through the ``beet keyfinder [QUERY]`` command. -There are a couple of configuration options to customize the behavior of -the plugin. By default they are:: +To use the ``keyfinder`` plugin, enable it in your configuration (see +:ref:`using-plugins`). - keyfinder: - bin: KeyFinder - auto: yes - overwrite: no +Configuration +------------- -* ``bin``: The name of the `KeyFinder` program on your system or - a path to the binary. If you installed the `KeyFinder`_ GUI on a Mac, for - example, you want something like - ``/Applications/KeyFinder.app/Contents/MacOS/KeyFinder``. -* ``auto``: If set to `yes`, the plugin will analyze every file on +To configure the plugin, make a ``keyfinder:`` section in your +configuration file. The available options are: + +- **auto**: Analyze every file on import. Otherwise, you need to use the ``beet keyfinder`` command explicitly. -* ``overwrite``: If set to `no`, the import hook and the command will skip - any file that already has an `initial_key` in the database. + Default: ``yes`` +- **bin**: The name of the `KeyFinder`_ program on your system or + a path to the binary. If you installed the KeyFinder GUI on a Mac, for + example, you want something like + ``/Applications/KeyFinder.app/Contents/MacOS/KeyFinder``. + Default: ``KeyFinder`` (i.e., search for the program in your ``$PATH``).. +- **overwrite**: Calculate a key even for files that already have an + `initial_key` value. + Default: ``no``. .. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/ diff -Nru beets-1.3.8+dfsg/docs/plugins/lastgenre.rst beets-1.3.19/docs/plugins/lastgenre.rst --- beets-1.3.8+dfsg/docs/plugins/lastgenre.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/lastgenre.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,30 +4,31 @@ The MusicBrainz database `does not contain genre information`_. Therefore, when importing and autotagging music, beets does not assign a genre. The ``lastgenre`` plugin fetches *tags* from `Last.fm`_ and assigns them as genres -to your albums and items. The plugin is included with beets as of version -1.0b11. +to your albums and items. .. _does not contain genre information: http://musicbrainz.org/doc/General_FAQ#Why_does_MusicBrainz_not_support_genre_information.3F .. _Last.fm: http://last.fm/ +Installation +------------ + The plugin requires `pylast`_, which you can install using `pip`_ by typing:: pip install pylast -After you have pylast installed, enable the plugin by putting ``lastgenre`` on -your ``plugins`` line in :doc:`config file </reference/config>`. +After you have pylast installed, enable the ``lastgenre`` plugin in your +configuration (see :ref:`using-plugins`). + +Usage +----- The plugin chooses genres based on a *whitelist*, meaning that only certain tags can be considered genres. This way, tags like "my favorite music" or "seen live" won't be considered genres. The plugin ships with a fairly extensive -`internal whitelist`_, but you can set your own in the config file using the -``whitelist`` configuration value:: - - lastgenre: - whitelist: /path/to/genres.txt - -…or go for no whitelist altogether by setting the option to `false`. +`internal whitelist`_, but you can set your own in the config file using the +``whitelist`` configuration value or forgo a whitelist altogether by setting +the option to `false`. The genre list file should contain one genre per line. Blank lines are ignored. For the curious, the default genre list is generated by a `script that scrapes @@ -36,81 +37,103 @@ .. _pip: http://www.pip-installer.org/ .. _pylast: http://code.google.com/p/pylast/ .. _script that scrapes Wikipedia: https://gist.github.com/1241307 -.. _internal whitelist: https://raw.githubusercontent.com/sampsyo/beets/master/beetsplug/lastgenre/genres.txt - -By default, beets will always fetch new genres, even if the files already have -once. To instead leave genres in place in when they pass the whitelist, set -the ``force`` option to `no`. - -If no genre is found, the file will be left unchanged. To instead specify a -fallback genre, use the ``fallback`` configuration option. You can, of -course, use the empty string as a fallback, like so:: - - lastgenre: - fallback: '' - +.. _internal whitelist: https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres.txt Canonicalization ----------------- +^^^^^^^^^^^^^^^^ The plugin can also *canonicalize* genres, meaning that more obscure genres can be turned into coarser-grained ones that are present in the whitelist. This -works using a tree of nested genre names, represented using `YAML`_, where the +works using a `tree of nested genre names`_, represented using `YAML`_, where the leaves of the tree represent the most specific genres. -To enable canonicalization, set the ``canonical`` configuration value:: +The most common way to use this would be with a custom whitelist containing only +a desired subset of genres. Consider for a example this minimal whitelist:: - lastgenre: - canonical: true + rock + heavy metal + pop + +together with the default genre tree. Then an item that has its genre specified +as *viking metal* would actually be tagged as *heavy metal* because neither +*viking metal* nor its parent *black metal* are in the whitelist. It always +tries to use the most specific genre that's available in the whitelist. + +The relevant subtree path in the default tree looks like this:: + + - rock: + - heavy metal: + - black metal: + - viking metal + +Considering that, it's not very useful to use the default whitelist (which +contains about any genre contained in the tree) with canonicalization because +nothing would ever be matched to a more generic node since all the specific +subgenres are in the whitelist to begin with. -Setting this value to `true` will use a built-in canonicalization -tree. You can also set it to a path, just like the ``whitelist`` config value, -to use your own tree. .. _YAML: http://www.yaml.org/ +.. _tree of nested genre names: https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres-tree.yaml Genre Source ------------- +^^^^^^^^^^^^ When looking up genres for albums or individual tracks, you can choose whether to use Last.fm tags on the album, the artist, or the track. For example, you -might want all the albums for a certain artist to carry the same genre. Set the -``source`` configuration value to "album", "track", or "artist", like so:: - - lastgenre: - source: artist - +might want all the albums for a certain artist to carry the same genre. The default is "album". When set to "track", the plugin will fetch *both* album-level and track-level genres for your music when importing albums. Multiple Genres ---------------- +^^^^^^^^^^^^^^^ By default, the plugin chooses the most popular tag on Last.fm as a genre. If you prefer to use a *list* of popular genre tags, you can increase the number -of the ``count`` config option:: - - lastgenre: - count: 3 +of the ``count`` config option. Lists of up to *count* genres will then be used instead of single genres. The genres are separated by commas by default, but you can change this with the -``separator`` config option:: - - lastgenre: - separator: ' / ' +``separator`` config option. `Last.fm`_ provides a popularity factor, a.k.a. *weight*, for each tag ranging from 100 for the most popular tag down to 0 for the least popular. The plugin uses this weight to discard unpopular tags. The default is to ignore tags with a weight less then 10. You can change this by setting -the ``min_weight`` config option:: +the ``min_weight`` config option. + +Configuration +------------- - lastgenre: - min_weight: 15 +To configure the plugin, make a ``lastgenre:`` section in your +configuration file. The available options are: +- **auto**: Fetch genres automatically during import. + Default: ``yes``. +- **canonical**: Use a canonicalization tree. Setting this to ``yes`` will use + a built-in tree. You can also set it to a path, like the ``whitelist`` + config value, to use your own tree. + Default: ``no`` (disabled). +- **count**: Number of genres to fetch. + Default: 1 +- **fallback**: A string if to use a fallback genre when no genre is found. + You can use the empty string ``''`` to reset the genre. + Default: None. +- **force**: By default, beets will always fetch new genres, even if the files + already have one. To instead leave genres in place in when they pass the + whitelist, set the ``force`` option to ``no``. + Default: ``yes``. +- **min_weight**: Minimum popularity factor below which genres are discarded. + Default: 10. +- **source**: Which entity to look up in Last.fm. Can be + either ``artist``, ``album`` or ``track``. + Default: ``album``. +- **separator**: A separator for multiple genres. + Default: ``', '``. +- **whitelist**: The filename of a custom genre list, ``yes`` to use + the internal whitelist, or ``no`` to consider all genres valid. + Default: ``yes``. Running Manually ---------------- diff -Nru beets-1.3.8+dfsg/docs/plugins/lastimport.rst beets-1.3.19/docs/plugins/lastimport.rst --- beets-1.3.8+dfsg/docs/plugins/lastimport.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/lastimport.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,61 @@ +LastImport Plugin +================= + +The ``lastimport`` plugin downloads play-count data from your `Last.fm`_ +library into beets' database. You can later create :doc:`smart playlists +</plugins/smartplaylist>` by querying ``play_count`` and do other fun stuff +with this field. + +.. _Last.fm: http://last.fm + +Installation +------------ + +The plugin requires `pylast`_, which you can install using `pip`_ by typing:: + + pip install pylast + +After you have pylast installed, enable the ``lastimport`` plugin in your +configuration (see :ref:`using-plugins`). + +Next, add your Last.fm username to your beets configuration file:: + + lastfm: + user: beetsfanatic + +.. _pip: http://www.pip-installer.org/ +.. _pylast: http://code.google.com/p/pylast/ + +Importing Play Counts +--------------------- + +Simply run ``beet lastimport`` and wait for the plugin to request tracks from +Last.fm and match them to your beets library. (You will be notified of tracks +in your Last.fm profile that do not match any songs in your library.) + +Then, your matched tracks will be populated with the ``play_count`` field, +which you can use in any query or template. For example:: + + $ beet ls -f '$title: $play_count' play_count:5.. + Eple (Melody A.M.): 60 + +To see more information (namely, the specific play counts for matched tracks), +use the ``-v`` option. + +Configuration +------------- + +Aside from the required ``lastfm.user`` field, this plugin has some specific +options under the ``lastimport:`` section: + +* **per_page**: The number of tracks to request from the API at once. + Default: 500. +* **retry_limit**: How many times should we re-send requests to Last.fm on + failure? + Default: 3. + +By default, the plugin will use beets's own Last.fm API key. You can also +override it with your own API key:: + + lastfm: + api_key: your_api_key diff -Nru beets-1.3.8+dfsg/docs/plugins/lyrics.rst beets-1.3.19/docs/plugins/lyrics.rst --- beets-1.3.8+dfsg/docs/plugins/lyrics.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/lyrics.rst 2016-06-20 01:53:12.000000000 +0000 @@ -3,24 +3,75 @@ The ``lyrics`` plugin fetches and stores song lyrics from databases on the Web. Namely, the current version of the plugin uses `Lyric Wiki`_, `Lyrics.com`_, -and, optionally, the Google custom search API. +`Musixmatch`_, `Genius.com`_, and, optionally, the Google custom search API. .. _Lyric Wiki: http://lyrics.wikia.com/ .. _Lyrics.com: http://www.lyrics.com/ +.. _Musixmatch: https://www.musixmatch.com/ +.. _Genius.com: http://genius.com/ Fetch Lyrics During Import -------------------------- -To automatically fetch lyrics for songs you import, just enable the plugin by -putting ``lyrics`` on your config file's ``plugins`` line (see -:doc:`/plugins/index`). When importing new files, beets will now fetch lyrics -for files that don't already have them. The lyrics will be stored in the beets -database. If the ``import.write`` config option is on, then the lyrics will also -be written to the files' tags. +To automatically fetch lyrics for songs you import, enable the ``lyrics`` +plugin in your configuration (see :ref:`using-plugins`). +Then, install the `requests`_ library by typing:: + + pip install requests + +The plugin uses `requests`_ to download lyrics. + +When importing new files, beets will now fetch lyrics for files that don't +already have them. The lyrics will be stored in the beets database. If the +``import.write`` config option is on, then the lyrics will also be written to +the files' tags. + +.. _requests: http://docs.python-requests.org/en/latest/ + + +Configuration +------------- + +To configure the plugin, make a ``lyrics:`` section in your +configuration file. The available options are: + +- **auto**: Fetch lyrics automatically during import. + Default: ``yes``. +- **bing_client_secret**: Your Bing Translation application password + (to :ref:`lyrics-translation`) +- **bing_lang_from**: By default all lyrics with a language other than + ``bing_lang_to`` are translated. Use a list of lang codes to restrict the set + of source languages to translate. + Default: ``[]`` +- **bing_lang_to**: Language to translate lyrics into. + Default: None. +- **fallback**: By default, the file will be left unchanged when no lyrics are + found. Use the empty string ``''`` to reset the lyrics in such a case. + Default: None. +- **force**: By default, beets won't fetch lyrics if the files already have + ones. To instead always fetch lyrics, set the ``force`` option to ``yes``. + Default: ``no``. +- **google_API_key**: Your Google API key (to enable the Google Custom Search + backend). + Default: None. +- **google_engine_ID**: The custom search engine to use. + Default: The `beets custom search engine`_, which gathers an updated list of + sources known to be scrapeable. +- **sources**: List of sources to search for lyrics. An asterisk ``*`` expands + to all available sources. + Default: ``google lyricwiki lyrics.com musixmatch``, i.e., all the + sources except for `genius`. The `google` source will be automatically + deactivated if no ``google_API_key`` is setup. -This behavior can be disabled with the ``auto`` config option (see below). +Here's an example of ``config.yaml``:: + lyrics: + fallback: '' + google_API_key: AZERTYUIOPQSDFGHJKLMWXCVBN1234567890_ab + google_engine_ID: 009217259823014548361:lndtuqkycfu + +.. _beets custom search engine: https://www.google.com:443/cse/publicurl?cx=009217259823014548361:lndtuqkycfu Fetching Lyrics Manually ------------------------ @@ -39,26 +90,8 @@ The ``-f`` option forces the command to fetch lyrics, even for tracks that already have lyrics. - -Configuring ------------ - -To disable automatic lyric fetching during import, set the ``auto`` option to -false, like so:: - - lyrics: - auto: no - -By default, if no lyrics are found, the file will be left unchanged. To -specify a placeholder for the lyrics tag when none are found, use the -``fallback`` configuration option:: - - lyrics: - fallback: 'No lyrics found' - .. _activate-google-custom-search: - Activate Google custom search ------------------------------ @@ -68,9 +101,14 @@ pip install beautifulsoup4 You also need to `register for a Google API key`_. Set the ``google_API_key`` -configuration option to your key. This enables the Google backend. +configuration option to your key. +Then add ``google`` to the list of sources in your configuration (or use +default list, which includes it as long as you have an API key). +If you use default ``google_engine_ID``, we recommend limiting the sources to +``musixmatch google`` as the other sources are already included in the Google +results. -.. _register for a Google API key: https://code.google.com/apis/console. +.. _register for a Google API key: https://code.google.com/apis/console Optionally, you can `define a custom search engine`_. Get your search engine's token and use it for your ``google_engine_ID`` configuration option. By @@ -78,14 +116,28 @@ .. _define a custom search engine: http://www.google.com/cse/all -Here's an example of ``config.yaml``:: - - lyrics: - google_API_key: AZERTYUIOPQSDFGHJKLMWXCVBN1234567890_ab - google_engine_ID: 009217259823014548361:lndtuqkycfu - Note that the Google custom search API is limited to 100 queries per day. -After that, the lyrics plugin will fall back on its other data sources. +After that, the lyrics plugin will fall back on other declared data sources. .. _pip: http://www.pip-installer.org/ .. _BeautifulSoup: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ + +.. _lyrics-translation: + +Activate On-the-Fly Translation +------------------------------- + +Using the Bing Translation API requires `langdetect`_, which you can install +using `pip`_ by typing:: + + pip install langdetect + +You also need to register for a Microsoft Azure Marketplace free account and +to the `Microsoft Translator API`_. Follow the four steps process, specifically +at step 3 enter ``beets`` as *Client ID* and copy/paste the generated +*Client secret* into your ``bing_client_secret`` configuration, alongside +``bing_lang_to`` target `language code`_. + +.. _langdetect: https://pypi.python.org/pypi/langdetect +.. _Microsoft Translator API: https://www.microsoft.com/en-us/translator/getstarted.aspx +.. _language code: https://msdn.microsoft.com/en-us/library/hh456380.aspx diff -Nru beets-1.3.8+dfsg/docs/plugins/mbcollection.rst beets-1.3.19/docs/plugins/mbcollection.rst --- beets-1.3.8+dfsg/docs/plugins/mbcollection.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/mbcollection.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,7 +6,8 @@ .. _music collection: http://musicbrainz.org/doc/Collections -To begin, just enable the ``mbcollection`` plugin (see :doc:`/plugins/index`). +To begin, just enable the ``mbcollection`` plugin in your +configuration (see :ref:`using-plugins`). Then, add your MusicBrainz username and password to your :doc:`configuration file </reference/config>` under a ``musicbrainz`` section:: @@ -19,16 +20,12 @@ If you don't have a MusicBrainz collection yet, you may need to add one to your profile first. -Automatically Update on Import ------------------------------- +Configuration +------------- -You can also configure the plugin to automatically amend your MusicBrainz -collection whenever you import a new album. To do this, first enable the -plugin and add your MusicBrainz account as above. Then, add ``mbcollection`` -section and enable the enable ``auto`` flag therein:: +To configure the plugin, make a ``mbcollection:`` section in your +configuration file. There is one option available: - mbcollection: - auto: yes - -During future imports, your default collection will be updated with each -imported album. +- **auto**: Automatically amend your MusicBrainz collection whenever you + import a new album. + Default: ``no``. diff -Nru beets-1.3.8+dfsg/docs/plugins/mbsubmit.rst beets-1.3.19/docs/plugins/mbsubmit.rst --- beets-1.3.8+dfsg/docs/plugins/mbsubmit.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/mbsubmit.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,54 @@ +MusicBrainz Submit Plugin +========================= + +The ``mbsubmit`` plugin provides an extra prompt choice during an import +session that prints the tracks of the current album in a format that is +parseable by MusicBrainz's `track parser`_. + +.. _track parser: http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings + +Usage +----- + +Enable the ``mbsubmit`` plugin in your configuration (see :ref:`using-plugins`) +and select the ``Print tracks`` choice which is by default displayed when no +strong recommendations are found for the album:: + + No matching release found for 3 tracks. + For help, see: http://beets.readthedocs.org/en/latest/faq.html#nomatch + [U]se as-is, as Tracks, Group albums, Skip, Enter search, enter Id, aBort, + Print tracks? p + 01. An Obscure Track - An Obscure Artist (3:37) + 02. Another Obscure Track - An Obscure Artist (2:05) + 03. The Third Track - Another Obscure Artist (3:02) + + No matching release found for 3 tracks. + For help, see: http://beets.readthedocs.org/en/latest/faq.html#nomatch + [U]se as-is, as Tracks, Group albums, Skip, Enter search, enter Id, aBort, + Print tracks? + +As MusicBrainz currently does not support submitting albums programmatically, +the recommended workflow is to copy the output of the ``Print tracks`` choice +and paste it into the parser that can be found by clicking on the +"Track Parser" button on MusicBrainz "Tracklist" tab. + +Configuration +------------- + +To configure the plugin, make a ``mbsubmit:`` section in your configuration +file. The following options are available: + +- **format**: The format used for printing the tracks, defined using the + same template syntax as beets’ :doc:`path formats </reference/pathformat>`. + Default: ``$track. $title - $artist ($length)``. +- **threshold**: The minimum strength of the autotagger recommendation that + will cause the ``Print tracks`` choice to be displayed on the prompt. + Default: ``medium`` (causing the choice to be displayed for all albums that + have a recommendation of medium strength or lower). Valid values: ``none``, + ``low``, ``medium``, ``strong``. + +Please note that some values of the ``threshold`` configuration option might +require other ``beets`` command line switches to be enabled in order to work as +intended. In particular, setting a threshold of ``strong`` will only display +the prompt if ``timid`` mode is enabled. You can find more information about +how the recommendation system works at :ref:`match-config`. diff -Nru beets-1.3.8+dfsg/docs/plugins/mbsync.rst beets-1.3.19/docs/plugins/mbsync.rst --- beets-1.3.8+dfsg/docs/plugins/mbsync.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/mbsync.rst 2016-06-20 01:53:12.000000000 +0000 @@ -15,9 +15,9 @@ Usage ----- -Enable the plugin and then run ``beet mbsync QUERY`` to fetch updated metadata -for a part of your collection (or omit the query to run over your whole -library). +Enable the ``mbsync`` plugin in your configuration (see :ref:`using-plugins`) +and then run ``beet mbsync QUERY`` to fetch updated metadata for a part of your +collection (or omit the query to run over your whole library). This plugin treats albums and singletons (non-album tracks) separately. It first processes all matching singletons and then proceeds on to full albums. @@ -33,3 +33,6 @@ * If you have the `import.write` configuration option enabled, then this plugin will write new metadata to files' tags. To disable this, use the ``-W`` (``--nowrite``) option. +* To customize the output of unrecognized items, use the ``-f`` + (``--format``) option. The default output is ``format_item`` or + ``format_album`` for items and albums, respectively. diff -Nru beets-1.3.8+dfsg/docs/plugins/metasync.rst beets-1.3.19/docs/plugins/metasync.rst --- beets-1.3.8+dfsg/docs/plugins/metasync.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/metasync.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,63 @@ +MetaSync Plugin +=============== + +This plugin provides the ``metasync`` command, which lets you fetch certain +metadata from other sources: for example, your favorite audio player. + +Currently, the plugin supports synchronizing with the `Amarok`_ music player, +and with `iTunes`_. +It can fetch the rating, score, first-played date, last-played date, play +count, and track uid from Amarok. + +.. _Amarok: https://amarok.kde.org/ +.. _iTunes: https://www.apple.com/itunes/ + + +Installation +------------ + +Enable the ``metasync`` plugin in your configuration (see +:ref:`using-plugins`). + +To synchronize with Amarok, you'll need the `dbus-python`_ library. There are +packages for most major Linux distributions. + +.. _dbus-python: http://dbus.freedesktop.org/releases/dbus-python/ + + +Configuration +------------- + +To configure the plugin, make a ``metasync:`` section in your configuration +file. The available options are: + +- **source**: A list of comma-separated sources to fetch metadata from. + Set this to "amarok" or "itunes" to enable synchronization with that player. + Default: empty + +The follow subsections describe additional configure required for some players. + +itunes +'''''' + +The path to your iTunes library **xml** file has to be configured, e.g.:: + + metasync: + source: itunes + itunes: + library: ~/Music/iTunes Library.xml + +Please note the indentation. + +Usage +----- + +Run ``beet metasync QUERY`` to fetch metadata from the configured list of +sources. + +The command has a few command-line options: + +* To preview the changes that would be made without applying them, use the + ``-p`` (``--pretend``) flag. +* To specify temporary sources to fetch metadata from, use the ``-s`` + (``--source``) flag with a comma-separated list of a sources. diff -Nru beets-1.3.8+dfsg/docs/plugins/missing.rst beets-1.3.19/docs/plugins/missing.rst --- beets-1.3.8+dfsg/docs/plugins/missing.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/missing.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,59 +6,46 @@ tracks are missing. Listing missing files requires one network call to MusicBrainz. Merely counting missing files avoids any network calls. +Usage +----- -Installation ------------- - -Enable the plugin by putting ``missing`` on your ``plugins`` line in -:doc:`config file </reference/config>`:: - - plugins: - missing - ... - -Configuration -------------- - +Add the ``missing`` plugin to your configuration (see :ref:`using-plugins`). By default, the ``beet missing`` command lists the names of tracks that your -library is missing from each album. You can customize the output format, count +library is missing from each album. +You can customize the output format, count the number of missing tracks per album, or total up the number of missing -tracks over your whole library. These options can either be specified in the -config file:: - - missing: - format: $albumartist - $album - $title - count: no - total: no - -or on the command-line:: +tracks over your whole library, using command-line switches:: -f FORMAT, --format=FORMAT print with custom FORMAT -c, --count count missing tracks per album -t, --total count total of missing tracks +…or by editing corresponding options. -format -~~~~~~ +Configuration +------------- -The ``format`` option (default: :ref:`list_format_item`) lets you specify a -specific format with which to print every track. This uses the same template -syntax as beets’ :doc:`path formats </reference/pathformat>`. The usage is -inspired by, and therefore similar to, the :ref:`list <list-cmd>` command. - -count -~~~~~ - -The ``count`` option (default: false) prints a count of missing tracks -per album, with ``format`` defaulting to ``$albumartist - $album: -$missing``. +To configure the plugin, make a ``missing:`` section in your +configuration file. The available options are: -total -~~~~~ +- **count**: Print a count of missing tracks per album, with ``format`` + defaulting to ``$albumartist - $album: $missing``. + Default: ``no``. +- **format**: A specific format with which to print every + track. This uses the same template syntax as beets' + :doc:`path formats </reference/pathformat>`. The usage is inspired by, and + therefore similar to, the :ref:`list <list-cmd>` command. + Default: :ref:`format_item`. +- **total**: Print a single count of missing tracks in all albums. + Default: ``no``. -The ``total`` option (default: false) prints a single -count of missing tracks in all albums +Here's an example :: + + missing: + format: $albumartist - $album - $title + count: no + total: no Template Fields --------------- @@ -94,9 +81,4 @@ beet ls -a -f '$albumartist - $album: $missing' -TODO ----- - -- Add caching. - .. _spark: https://github.com/holman/spark diff -Nru beets-1.3.8+dfsg/docs/plugins/mpdstats.rst beets-1.3.19/docs/plugins/mpdstats.rst --- beets-1.3.8+dfsg/docs/plugins/mpdstats.rst 2014-04-08 02:32:52.000000000 +0000 +++ beets-1.3.19/docs/plugins/mpdstats.rst 2016-06-20 01:53:12.000000000 +0000 @@ -14,56 +14,44 @@ Installing Dependencies ----------------------- -This plugin requires the python-mpd library in order to talk to the MPD +This plugin requires the python-mpd2 library in order to talk to the MPD server. Install the library from `pip`_, like so:: - $ pip install python-mpd + $ pip install python-mpd2 -.. _pip: http://www.pip-installer.org/ - -Configuring ------------ - -To use it, enable it in your ``config.yaml`` by putting ``mpdstats`` on your -``plugins`` line. Then, you'll probably want to configure the specifics of -your MPD server. You can do that using an ``mpd:`` section in your -``config.yaml``, which looks like this:: - - mpd: - host: localhost - port: 6600 - password: seekrit - -If your MPD library is at another location then the beets library (e.g., -because one is mounted on a NFS share), you can specify the -``music_directory`` in the config like this:: - - mpdstats: - music_directory: /PATH/TO/YOUR/FILES - -If you don't want the plugin to update the rating, you can disable it with:: - - mpdstats: - rating: False - -If you want to change the way the rating is calculated, you can set the -``rating_mix`` option like this:: - - mpdstats: - rating_mix: 1.0 - -For details, see below. +Add the ``mpdstats`` plugin to your configuration (see :ref:`using-plugins`). +.. _pip: http://www.pip-installer.org/ Usage ----- -Now use the ``mpdstats`` command to fire it up:: +Use the ``mpdstats`` command to fire it up:: $ beet mpdstats +Configuration +------------- + +To configure the plugin, make an ``mpd:`` section in your +configuration file. The available options are: + +- **host**: The MPD server hostname. + Default: ``localhost``. +- **port**: The MPD server port. + Default: 6600. +- **password**: The MPD server password. + Default: None. +- **music_directory**: If your MPD library is at a different location from the + beets library (e.g., because one is mounted on a NFS share), specify the path + here. + Default: The beets library directory. +- **rating**: Enable rating updates. + Default: ``yes``. +- **rating_mix**: Tune the way rating is calculated (see below). + Default: 0.75. A Word on Ratings ----------------- @@ -107,4 +95,4 @@ This has only been tested with MPD versions >= 0.16. It may not work on older versions. If that is the case, please report an `issue`_. -.. _issue: https://github.com/sampsyo/beets/issues +.. _issue: https://github.com/beetbox/beets/issues diff -Nru beets-1.3.8+dfsg/docs/plugins/mpdupdate.rst beets-1.3.19/docs/plugins/mpdupdate.rst --- beets-1.3.8+dfsg/docs/plugins/mpdupdate.rst 2014-04-08 02:32:52.000000000 +0000 +++ beets-1.3.19/docs/plugins/mpdupdate.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,9 +6,10 @@ .. _MPD: http://www.musicpd.org/ -To use it, enable it in your ``config.yaml`` by putting ``mpdupdate`` on your -``plugins`` line. Then, you'll probably want to configure the specifics of your -MPD server. You can do that using an ``mpd:`` section in your ``config.yaml``, +To use ``mpdupdate`` plugin, enable it in your configuration +(see :ref:`using-plugins`). +Then, you'll probably want to configure the specifics of your MPD server. +You can do that using an ``mpd:`` section in your ``config.yaml``, which looks like this:: mpd: @@ -16,9 +17,22 @@ port: 6600 password: seekrit -With that all in place, you'll see beets send the "update" command to your MPD server every time you change your beets library. +With that all in place, you'll see beets send the "update" command to your MPD +server every time you change your beets library. If you want to communicate with MPD over a Unix domain socket instead over TCP, just give the path to the socket in the filesystem for the ``host`` setting. (Any ``host`` value starting with a slash or a tilde is interpreted as a domain socket.) + +Configuration +------------- + +The available options under the ``mpd:`` section are: + +- **host**: The MPD server name. + Default: ``localhost``. +- **port**: The MPD server port. + Default: 6600. +- **password**: The MPD server password. + Default: None. diff -Nru beets-1.3.8+dfsg/docs/plugins/permissions.rst beets-1.3.19/docs/plugins/permissions.rst --- beets-1.3.8+dfsg/docs/plugins/permissions.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/permissions.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,21 @@ +Permissions Plugin +================== + +The ``permissions`` plugin allows you to set file permissions for imported +music files and its directories. + +To use the ``permissions`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Permissions will be adjusted automatically on import. + +Configuration +------------- + +To configure the plugin, make an ``permissions:`` section in your configuration +file. The ``file`` config value therein uses **octal modes** to specify the +desired permissions. The default flags for files are octal 644 and 755 for directories. + +Here's an example:: + + permissions: + file: 644 + dir: 755 diff -Nru beets-1.3.8+dfsg/docs/plugins/play.rst beets-1.3.19/docs/plugins/play.rst --- beets-1.3.8+dfsg/docs/plugins/play.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/play.rst 2016-06-20 01:53:12.000000000 +0000 @@ -4,7 +4,10 @@ The ``play`` plugin allows you to pass the results of a query to a music player in the form of an m3u playlist. -To use the plugin, enable it in your configuration (see +Usage +----- + +To use the ``play`` plugin, enable it in your configuration (see :ref:`using-plugins`). Then use it by invoking the ``beet play`` command with a query. The command will create a temporary m3u file and open it using an appropriate application. You can query albums instead of tracks using the @@ -23,15 +26,73 @@ play: command: /usr/bin/command --option1 --option2 some_other_option -You can configure the plugin to emit relative paths. Use the ``relative_to`` -configuration option:: +While playing you'll be able to interact with the player if it is a +command-line oriented, and you'll get its output in real time. + +Configuration +------------- + +To configure the plugin, make a ``play:`` section in your +configuration file. The available options are: + +- **command**: The command used to open the playlist. + Default: ``open`` on OS X, ``xdg-open`` on other Unixes and ``start`` on + Windows. Insert ``{}`` to make use of the ``--args``-feature. +- **relative_to**: If set, emit paths relative to this directory. + Default: None. +- **use_folders**: When using the ``-a`` option, the m3u will contain the + paths to each track on the matched albums. Enable this option to + store paths to folders instead. + Default: ``no``. +- **raw**: Instead of creating a temporary m3u playlist and then opening it, + simply call the command with the paths returned by the query as arguments. + Default: ``no``. +- **warning_threshold**: Set the minimum number of files to play which will + trigger a warning to be emitted. If set to ``no``, warning are never issued. + Default: 100. + +Optional Arguments +------------------ + +The ``--args`` (or ``-A``) flag to the ``play`` command lets you specify +additional arguments for your player command. Options are inserted after the +configured ``command`` string and before the playlist filename. + +For example, if you have the plugin configured like this:: + + play: + command: mplayer -quiet + +and you occasionally want to shuffle the songs you play, you can type:: + + $ beet play --args -shuffle + +to get beets to execute this command:: + + mplayer -quiet -shuffle /path/to/playlist.m3u + +instead of the default. + +If you need to insert arguments somewhere other than the end of the +``command`` string, use ``$args`` to indicate where to insert them. For +example:: + + play: + command: mpv $args --playlist + +indicates that you need to insert extra arguments before specifying the +playlist. - play: - relative_to: /my/music/folder +Note on the Leakage of the Generated Playlists +_______________________________________________ -When using the ``-a`` option, the m3u will have the paths to each track on -the matched albums. If you wish to have folders instead, you can change that -by setting ``use_files: False`` in your configuration file. +Because the command that will open the generated ``.m3u`` files can be +arbitrarily configured by the user, beets won't try to delete those files. For +this reason, using this plugin will leave one or several playlist(s) in the +directory selected to create temporary files (Most likely ``/tmp/`` on Unix-like +systems. See `tempfile.tempdir`_.). Leaking those playlists until they are +externally wiped could be an issue for privacy or storage reasons. If this is +the case for you, you might want to use the ``raw`` config option described +above. -Enable beets' verbose logging to see the command's output if you need to -debug. +.. _tempfile.tempdir: https://docs.python.org/2/library/tempfile.html#tempfile.tempdir diff -Nru beets-1.3.8+dfsg/docs/plugins/plexupdate.rst beets-1.3.19/docs/plugins/plexupdate.rst --- beets-1.3.8+dfsg/docs/plugins/plexupdate.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/plexupdate.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,43 @@ +PlexUpdate Plugin +================= + +``plexupdate`` is a very simple plugin for beets that lets you automatically +update `Plex`_'s music library whenever you change your beets library. + +To use ``plexupdate`` plugin, enable it in your configuration +(see :ref:`using-plugins`). +Then, you'll probably want to configure the specifics of your Plex server. +You can do that using an ``plex:`` section in your ``config.yaml``, +which looks like this:: + + plex: + host: localhost + port: 32400 + token: token + +The ``token`` key is optional: you'll need to use it when in a Plex Home (see Plex's own `documentation about tokens`_). + +To use the ``plexupdate`` plugin you need to install the `requests`_ library with: + + pip install requests + +With that all in place, you'll see beets send the "update" command to your Plex +server every time you change your beets library. + +.. _Plex: http://plex.tv/ +.. _requests: http://docs.python-requests.org/en/latest/ +.. _documentation about tokens: https://support.plex.tv/hc/en-us/articles/204059436-Finding-your-account-token-X-Plex-Token + +Configuration +------------- + +The available options under the ``plex:`` section are: + +- **host**: The Plex server name. + Default: ``localhost``. +- **port**: The Plex server port. + Default: 32400. +- **token**: The Plex Home token. + Default: Empty. +- **library_name**: The name of the Plex library to update. + Default: ``Music`` diff -Nru beets-1.3.8+dfsg/docs/plugins/random.rst beets-1.3.19/docs/plugins/random.rst --- beets-1.3.8+dfsg/docs/plugins/random.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/random.rst 2016-06-20 01:53:12.000000000 +0000 @@ -5,8 +5,8 @@ from your library. This can be helpful if you need some help deciding what to listen to. -First, enable the plugin named ``random`` (see :doc:`/plugins/index`). You'll then -be able to use the ``beet random`` command:: +First, enable the plugin named ``random`` (see :ref:`using-plugins`). You'll +then be able to use the ``beet random`` command:: $ beet random Aesop Rock - None Shall Pass - The Harbor Is Yours diff -Nru beets-1.3.8+dfsg/docs/plugins/replaygain.rst beets-1.3.19/docs/plugins/replaygain.rst --- beets-1.3.8+dfsg/docs/plugins/replaygain.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/replaygain.rst 2016-06-20 01:53:12.000000000 +0000 @@ -10,9 +10,10 @@ Installation ------------ -This plugin can use one of two backends to compute the ReplayGain values: -GStreamer and mp3gain (and its cousin, aacgain). mp3gain can be easier to -install but GStreamer support more audio formats. +This plugin can use one of four backends to compute the ReplayGain values: +GStreamer, mp3gain (and its cousin, aacgain), Python Audio Tools and bs1770gain. mp3gain +can be easier to install but GStreamer, Audio Tools and bs1770gain support more audio +formats. Once installed, this plugin analyzes all files during the import process. This can be a slow process; to instead analyze after the fact, disable automatic @@ -28,8 +29,8 @@ .. _PyGObject 3.x: https://wiki.gnome.org/action/show/Projects/PyGObject .. _GStreamer: http://gstreamer.freedesktop.org/ -Then, enable the plugin (see :ref:`using-plugins`) and specify the GStreamer -backend by adding this to your configuration file:: +Then, enable the ``replaygain`` plugin (see :ref:`using-plugins`) and specify +the GStreamer backend by adding this to your configuration file:: replaygain: backend: gstreamer @@ -61,35 +62,72 @@ replaygain: command: /Applications/MacMP3Gain.app/Contents/Resources/aacgain +Python Audio Tools +`````````````````` -Configuration -------------- +This backend uses the `Python Audio Tools`_ package to compute ReplayGain for +a range of different file formats. The package is not available via PyPI; it +must be installed manually (only versions preceding 3.x are compatible). -Available configuration options for the ``replaygain`` section in your -configuration file include: +On OS X, most of the dependencies can be installed with `Homebrew`_:: -* **overwrite**: By default, files that already have ReplayGain tags will not - be re-analyzed. If you want to analyze *every* file on import, you can set - the ``overwrite`` option for the plugin in your :doc:`configuration file - </reference/config>`, like so:: + brew install mpg123 mp3gain vorbisgain faad2 libvorbis - replaygain: - overwrite: yes +.. _Python Audio Tools: http://audiotools.sourceforge.net -* **targetlevel**: The target loudness level can be modified to any number of - decibels with the ``targetlevel`` option (default: 89 dB). +bs1770gain +`````````` -These options only work with the "command" backend: +To use this backend, you will need to install the `bs1770gain`_ command-line +tool. Follow the instructions at the `bs1770gain`_ Web site and ensure that +the tool is on your ``$PATH``. + +.. _bs1770gain: http://bs1770gain.sourceforge.net/ + +Then, enable the plugin (see :ref:`using-plugins`) and specify the +backend in your configuration file:: + + replaygain: + backend: bs1770gain -* **apply**: If you use a player that does not support ReplayGain - specifications, you can force the volume normalization by applying the gain - to the file via the ``apply`` option. This is a lossless and reversible - operation with no transcoding involved. -* **noclip**: The use of ReplayGain can cause clipping if the average volume - of a song is below the target level. By default, a "prevent clipping" option - named ``noclip`` is enabled to reduce the amount of ReplayGain adjustment to - whatever amount would keep clipping from occurring. +For Windows users: the tool currently has issues with long and non-ASCII path +names. You may want to use the :ref:`asciify-paths` configuration option until +this is resolved. + +Configuration +------------- + +To configure the plugin, make a ``replaygain:`` section in your +configuration file. The available options are: + +- **auto**: Enable ReplayGain analysis during import. + Default: ``yes``. +- **backend**: The analysis backend; either ``gstreamer``, ``command``, or ``audiotools``. + Default: ``command``. +- **overwrite**: Re-analyze files that already have ReplayGain tags. + Default: ``no``. +- **targetlevel**: A number of decibels for the target loudness level. + Default: 89. + +These options only work with the "command" backend: +- **command**: The path to the ``mp3gain`` or ``aacgain`` executable (if beets + cannot find it by itself). + For example: ``/Applications/MacMP3Gain.app/Contents/Resources/aacgain``. + Default: Search in your ``$PATH``. +- **noclip**: Reduce the amount of ReplayGain adjustment to whatever amount + would keep clipping from occurring. + Default: ``yes``. + +These options only works with the "bs1770gain" backend: + +- **method**: The loudness scanning standard: either `replaygain` for + ReplayGain 2.0, `ebu` for EBU R128, or `atsc` for ATSC A/85. This dictates + the reference level: -18, -23, or -24 LUFS respectively. Default: + `replaygain` +- **chunk_at**: Splits an album in groups of tracks of this amount. + Usefull when running into memory problems when analysing albums with + an exceptionally large amount of tracks. Default:5000 Manual Analysis --------------- diff -Nru beets-1.3.8+dfsg/docs/plugins/rewrite.rst beets-1.3.19/docs/plugins/rewrite.rst --- beets-1.3.8+dfsg/docs/plugins/rewrite.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/rewrite.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,12 +6,12 @@ such as artists: for example, perhaps you want albums from The Jimi Hendrix Experience to be sorted into the same folder as solo Hendrix albums. -To use field rewriting, first enable the plugin by putting ``rewrite`` on your -``plugins`` line. Then, make a ``rewrite:`` section in your config file to -contain your rewrite rules. Each rule consists of a field name, a regular -expression pattern, and a replacement value. Rules are written ``fieldname -regex: replacement``. For example, this line implements the Jimi Hendrix -example above:: +To use field rewriting, first enable the ``rewrite`` plugin +(see :ref:`using-plugins`). +Then, make a ``rewrite:`` section in your config file to contain your rewrite +rules. Each rule consists of a field name, a regular expression pattern, and a +replacement value. Rules are written ``fieldname regex: replacement``. +For example, this line implements the Jimi Hendrix example above:: rewrite: artist The Jimi Hendrix Experience: Jimi Hendrix diff -Nru beets-1.3.8+dfsg/docs/plugins/scrub.rst beets-1.3.19/docs/plugins/scrub.rst --- beets-1.3.8+dfsg/docs/plugins/scrub.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/scrub.rst 2016-06-20 01:53:12.000000000 +0000 @@ -11,7 +11,7 @@ ------------------- To automatically remove files' tags before writing new ones, just -enable the plugin (see :doc:`/plugins/index`). When importing new files (with +enable the ``scrub`` plugin (see :ref:`using-plugins`). When importing new files (with ``import.write`` turned on) or modifying files' tags with the ``beet modify`` command, beets will first strip all types of tags entirely and then write the database-tracked metadata to the file. @@ -31,11 +31,11 @@ not restore any information. This will leave the files with no metadata whatsoever. -Configuring ------------ +Configuration +------------- -The plugin has one configuration option, ``auto``, which lets you disable -automatic metadata stripping. To do so, add this to your ``config.yaml``:: +To configure the plugin, make a ``scrub:`` section in your +configuration file. There is one option: - scrub: - auto: no +- **auto**: Enable metadata stripping during import. + Default: ``yes``. diff -Nru beets-1.3.8+dfsg/docs/plugins/smartplaylist.rst beets-1.3.19/docs/plugins/smartplaylist.rst --- beets-1.3.8+dfsg/docs/plugins/smartplaylist.rst 2014-04-08 02:32:52.000000000 +0000 +++ beets-1.3.19/docs/plugins/smartplaylist.rst 2016-06-20 01:53:12.000000000 +0000 @@ -7,9 +7,9 @@ .. _MPD's: http://www.musicpd.org/ -To use it, enable the plugin by putting ``smartplaylist`` in the ``plugins`` -section in your ``config.yaml``. Then configure your smart playlists like the -following example:: +To use it, enable the ``smartplaylist`` plugin in your configuration +(see :ref:`using-plugins`). +Then configure your smart playlists like the following example:: smartplaylist: relative_to: ~/Music @@ -21,12 +21,6 @@ - name: beatles.m3u query: 'artist:Beatles' -If you intend to use this plugin to generate playlists for MPD, you should set -``relative_to`` to your MPD music directory (by default, ``relative_to`` is -``None``, and the absolute paths to your music files will be generated). - -``playlist_dir`` is where the generated playlist files will be put. - You can generate as many playlists as you want by adding them to the ``playlists`` section, using beets query syntax (see :doc:`/reference/query`) for ``query`` and the file name to be generated for @@ -50,6 +44,18 @@ - name: 'BeatlesUniverse.m3u' query: ['artist:beatles', 'genre:"beatles cover"'] +Note that since beets query syntax is in effect, you can also use sorting +directives:: + + - name: 'Chronological Beatles' + query: 'artist:Beatles year+' + - name: 'Mixed Rock' + query: ['artist:Beatles year+', 'artist:"Led Zeppelin" bitrate+'] + +The former case behaves as expected, however please note that in the latter the +sorts will be merged: ``year+ bitrate+`` will apply to both the Beatles and Led +Zeppelin. If that bothers you, please get in touch. + For querying albums instead of items (mainly useful with extensible fields), use the ``album_query`` field. ``query`` and ``album_query`` can be used at the same time. The following example gathers single items but also items belonging @@ -59,16 +65,34 @@ album_query: 'for_travel:1' query: 'for_travel:1' -By default, all playlists are automatically regenerated after every beets -command that changes the library database. This can be disabled by specifying -``auto: no``. To force regeneration, you can invoke it manually from the -command line:: +By default, each playlist is automatically regenerated at the end of the +session if an item or album it matches changed in the library database. To +force regeneration, you can invoke it manually from the command line:: $ beet splupdate -which will generate your new smart playlists. +This will regenerate all smart playlists. You can also specify which ones you +want to regenerate:: + + $ beet splupdate BeatlesUniverse.m3u MyTravelPlaylist You can also use this plugin together with the :doc:`mpdupdate`, in order to automatically notify MPD of the playlist change, by adding ``mpdupdate`` to the ``plugins`` line in your config file *after* the ``smartplaylist`` plugin. + +Configuration +------------- + +To configure the plugin, make a ``smartplaylist:`` section in your +configuration file. In addition to the ``playlists`` described above, the +other configuration options are: + +- **auto**: Regenerate the playlist after every database change. + Default: ``yes``. +- **playlist_dir**: Where to put the generated playlist files. + Default: The current working directory (i.e., ``'.'``). +- **relative_to**: Generate paths in the playlist files relative to a base + directory. If you intend to use this plugin to generate playlists for MPD, + point this to your MPD music directory. + Default: Use absolute paths. diff -Nru beets-1.3.8+dfsg/docs/plugins/spotify.rst beets-1.3.19/docs/plugins/spotify.rst --- beets-1.3.8+dfsg/docs/plugins/spotify.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/plugins/spotify.rst 2016-06-20 01:53:12.000000000 +0000 @@ -16,8 +16,8 @@ Basic Usage ----------- -First, enable the plugin (see :ref:`using-plugins`). Then, use the ``spotify`` -command with a beets query:: +First, enable the ``spotify`` plugin (see :ref:`using-plugins`). +Then, use the ``spotify`` command with a beets query:: beet spotify [OPTIONS...] QUERY @@ -37,51 +37,44 @@ * ``--show-failures`` or ``-f``: List the tracks that did not match a Spotify ID. -Configuring ------------ +Configuration +------------- + +The default options should work as-is, but there are some options you can put +in config.yaml under the ``spotify:`` section: + +- **mode**: One of the following: -The default options should work as-is, but there are some options you can put in config.yaml: + - ``list``: Print out the playlist as a list of links. This list can then + be pasted in to a new or existing Spotify playlist. + - ``open``: This mode actually sends a link to your default browser with + instructions to open Spotify with the playlist you created. Until this + has been tested on all platforms, it will remain optional. -* ``mode``: See the section below on modes. -* ``region_filter``: Use the 2-character country abbreviation to limit results + Default: ``list``. +- **region_filter**: A two-character country abbreviation, to limit results to that market. -* ``show_failures``: Show the artist/album/track for each lookup that does not - return a Spotify ID (and therefore cannot be added to a playlist). -* ``tiebreak``: How to choose the track if there is more than one identical - result. For example, there might be multiple releases of the same album. - Currently, this defaults to "popularity", "first" simply chooses the first - in the list returned by Spotify. -* ``regex``: An array of regex transformations to perform on the + Default: None. +- **show_failures**: List each lookup that does not return a Spotify ID (and + therefore cannot be added to a playlist). + Default: ``no``. +- **tiebreak**: How to choose the track if there is more than one identical + result. For example, there might be multiple releases of the same album. + The options are ``popularity`` and ``first`` (to just choose the first match + returned). + Default: ``popularity``. +- **regex**: An array of regex transformations to perform on the track/album/artist fields before sending them to Spotify. Can be useful for changing certain abbreviations, like ft. -> feat. See the examples below. -* ``artist_field`` / ``album_field`` / ``track_field``: These allow the user - to choose a different field to send to Spotify when looking up the track, - album and artist. Most users will not want to change this. + Default: None. -Example Configuration ---------------------- - -:: +Here's an example:: spotify: - # Default is list, shows the plugin output. Open attempts to open - # directly in Spotify (only tested on Mac). - mode: "open" - - # Filter tracks by only that market (2-letter code) - region_filter: "US" - - # Display the tracks that did not match a Spotify ID. + mode: open + region_filter: US show_faiulres: on - - # Need to break ties when then are multiple tracks. Default is - popularity. - tiebreak: "first" - - # Which beets fields to use for lookups. - artist_field: "albumartist" - album_field: "album" - track_field: "title" + tiebreak: first regex: [ { @@ -96,12 +89,3 @@ } ] -Spotify Plugin Modes ---------------------- - -* ``list``: The default mode is to print out the playlist as a list of links. - This list can then be pasted in to a new or existing Spotify playlist. -* ``open``: This mode actually sends a link to your default browser with - instructions to open Spotify with the playlist you created. Until this has - been tested on all platforms, it will remain optional. - diff -Nru beets-1.3.8+dfsg/docs/plugins/the.rst beets-1.3.19/docs/plugins/the.rst --- beets-1.3.8+dfsg/docs/plugins/the.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/the.rst 2016-06-20 01:53:12.000000000 +0000 @@ -2,39 +2,42 @@ ========== The ``the`` plugin allows you to move patterns in path formats. It's suitable, -for example, for moving articles from string start to the end. This is useful -for quick search on filesystems and generally looks good. Plugin DOES NOT -change tags. By default plugin supports English "the, a, an", but custom +for example, for moving articles from string start to the end. This is useful +for quick search on filesystems and generally looks good. Plugin does not +change tags. By default plugin supports English "the, a, an", but custom regexp patterns can be added by user. How it works:: The Something -> Something, The A Band -> Band, A An Orchestra -> Orchestra, An -To use plugin, enable it by including ``the`` into ``plugins`` line of your -beets config. The plugin provides a template function called ``%the`` for use -in path format expressions:: +To use the ``the`` plugin, enable it (see :doc:`/plugins/index`) and then use +a template function called ``%the`` in path format expressions:: paths: default: %the{$albumartist}/($year) $album/$track $title The default configuration moves all English articles to the end of the string, -but you can override these defaults to make more complex changes:: +but you can override these defaults to make more complex changes. - the: - # handle "The" (on by default) - the: yes - # handle "A/An" (on by default) - a: yes - # format string, {0} - part w/o article, {1} - article - # spaces already trimmed from ends of both parts - # default is '{0}, {1}' - format: '{0}, {1}' - # strip instead of moving to the end, default is off - strip: no - # custom regexp patterns, space-separated - patterns: ... - -Custom patterns are case-insensitive regular expressions. Patterns can be -matched anywhere in the string (not just the beginning), so use ``^`` if you -intend to match leading words. +Configuration +------------- + +To configure the plugin, make a ``the:`` section in your +configuration file. The available options are: + +- **a**: Handle "A/An" moves. + Default: ``yes``. +- **the**: handle "The" moves. + Default: ``yes``. +- **patterns**: Custom regexp patterns, space-separated. Custom patterns are + case-insensitive regular expressions. Patterns can be matched anywhere in the + string (not just the beginning), so use ``^`` if you intend to match leading + words. + Default: ``[]``. +- **strip**: Remove the article altogether instead of moving it to the end. + Default: ``no``. +- **format**: A Python format string for the output. Use ``{0}`` to indicate + the part without the article and ``{1}`` for the article. + Spaces are already trimmed from ends of both parts. + Default: ``'{0}, {1}'``. diff -Nru beets-1.3.8+dfsg/docs/plugins/thumbnails.rst beets-1.3.19/docs/plugins/thumbnails.rst --- beets-1.3.8+dfsg/docs/plugins/thumbnails.rst 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/plugins/thumbnails.rst 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,38 @@ +Thumbnails Plugin +================== + +The ``thumbnails`` plugin creates thumbnails your for album folders with the +album cover. This works on freedesktop.org-compliant file managers such as +Nautilus or Thunar, and is therefore POSIX-only. + +To use the ``thumbnails`` plugin, enable it (see :doc:`/plugins/index`) as well +as the :doc:`/plugins/fetchart`. You'll need 2 additional python packages: +`pyxdg` and `pathlib`. + +``thumbnails`` needs to resize the covers, and therefore requires either +`ImageMagick`_ or `Pillow`_. + +.. _Pillow: https://github.com/python-pillow/Pillow +.. _ImageMagick: http://www.imagemagick.org/ + +Configuration +------------- + +To configure the plugin, make a ``thumbnails`` section in your configuration +file. The available options are + +- **auto**: Whether the thumbnail should be automatically set on import. + Default: ``yes``. +- **force**: Generate the thumbnail even when there's one that seems fine (more + recent than the cover art). + Default: ``no``. +- **dolphin**: Generate dolphin-compatible thumbnails. Dolphin (KDE file + explorer) does not respect freedesktop.org's standard on thumbnails. This + functionality replaces the :doc:`/plugins/freedesktop` + Default: ``no`` + +Usage +----- + +The ``thumbnails`` command provided by this plugin creates a thumbnail for +albums that match a query (see :doc:`/reference/query`). diff -Nru beets-1.3.8+dfsg/docs/plugins/types.rst beets-1.3.19/docs/plugins/types.rst --- beets-1.3.8+dfsg/docs/plugins/types.rst 2014-09-18 02:05:20.000000000 +0000 +++ beets-1.3.19/docs/plugins/types.rst 2016-06-20 01:53:12.000000000 +0000 @@ -6,8 +6,8 @@ that you can query it with ranges---which isn't possible when the field is considered a string (the default). -Enable the plugin as described in :doc:`/plugins/index` and then add a -``types`` section to your :doc:`configuration file </reference/config>`. The +Enable the ``types`` plugin as described in :doc:`/plugins/index` and then add +a ``types`` section to your :doc:`configuration file </reference/config>`. The configuration section should map field name to one of ``int``, ``float``, ``bool``, or ``date``. @@ -15,3 +15,12 @@ types: rating: int + +Now you can assign numeric ratings to tracks and albums and use :ref:`range +queries <numericquery>` to filter them.:: + + beet modify "My favorite track" rating=5 + beet ls rating:4..5 + + beet modify --album "My favorite album" rating=5 + beet ls --album rating:4..5 diff -Nru beets-1.3.8+dfsg/docs/plugins/web.rst beets-1.3.19/docs/plugins/web.rst --- beets-1.3.8+dfsg/docs/plugins/web.rst 2014-04-22 21:47:13.000000000 +0000 +++ beets-1.3.19/docs/plugins/web.rst 2016-06-20 01:53:12.000000000 +0000 @@ -16,12 +16,17 @@ ------- The Web interface depends on `Flask`_. To get it, just run ``pip install -flask``. +flask``. Then enable the ``web`` plugin in your configuration (see +:ref:`using-plugins`). .. _Flask: http://flask.pocoo.org/ -Put ``web`` on your ``plugins`` line in your configuration file to enable the -plugin. +If you need CORS (it's disabled by default---see :ref:`web-cors`, below), then +you also need `flask-cors`_. Just type ``pip install flask-cors``. + +.. _flask-cors: https://github.com/CoryDolphin/flask-cors +.. _CORS: http://en.wikipedia.org/wiki/Cross-origin_resource_sharing + Run the Server -------------- @@ -35,12 +40,8 @@ can be specified on the command line or in the ``[web]`` section of your :doc:`configuration file </reference/config>`. -On the command line, use ``beet web [HOSTNAME] [PORT]``. In the config file, use -something like this:: - - web: - host: 127.0.0.1 - port: 8888 +On the command line, use ``beet web [HOSTNAME] [PORT]``. Or the configuration +options below. Usage ----- @@ -50,6 +51,19 @@ .. _HTML5 Audio: http://www.w3.org/TR/html-markup/audio.html +Configuration +------------- + +To configure the plugin, make a ``web:`` section in your +configuration file. The available options are: + +- **host**: The server hostname. Set this to 0.0.0.0 to bind to all interfaces. + Default: Bind to 127.0.0.1. +- **port**: The server port. + Default: 8337. +- **cors**: The CORS allowed origin (see :ref:`web-cors`, below). + Default: CORS is disabled. + Implementation -------------- @@ -71,6 +85,31 @@ .. _html5media: http://html5media.info/ .. _MediaElement.js: http://mediaelementjs.com/ +.. _web-cors: + +Cross-Origin Resource Sharing (CORS) +------------------------------------ + +The ``web`` plugin's API can be used as a backend for an in-browser client. By +default, browsers will only allow access from clients running on the same +server as the API. (You will get an arcane error about ``XMLHttpRequest`` +otherwise.) A technology called `CORS`_ lets you relax this restriction. + +If you want to use an in-browser client hosted elsewhere (or running from +a different server on your machine), first install the `flask-cors`_ plugin by +typing ``pip install flask-cors``. Then set the ``cors`` configuration option +to the "origin" (protocol, host, and optional port number) where the client is +served. Or set it to ``'*'`` to enable access from all origins. Note that +there are security implications if you set the origin to ``'*'``, so please +research this before using it. + +For example:: + + web: + host: 0.0.0.0 + cors: 'http://example.com' + + JSON API -------- diff -Nru beets-1.3.8+dfsg/docs/plugins/zero.rst beets-1.3.19/docs/plugins/zero.rst --- beets-1.3.8+dfsg/docs/plugins/zero.rst 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/docs/plugins/zero.rst 2016-06-20 01:53:12.000000000 +0000 @@ -3,15 +3,32 @@ The ``zero`` plugin allows you to null fields in files' metadata tags. Fields can be nulled unconditionally or conditioned on a pattern match. For example, -the plugin can strip useless comments like "ripped by MyGreatRipper." This -plugin only affects files' tags; the beets database is unchanged. +the plugin can strip useless comments like "ripped by MyGreatRipper." -To use the plugin, enable it by including ``zero`` in the ``plugins`` line of -your configuration file. To configure the plugin, use a ``zero:`` section in -your configuration file. Set ``fields`` to the (whitespace-separated) list of -fields to change. You can get the list of available fields by running ``beet -fields``. To conditionally filter a field, use ``field: [regexp, regexp]`` to -specify regular expressions. +The plugin can work in one of two modes. The first mode, the default, is a +blacklist, where you choose the tags you want to remove. The second mode is a +whitelist, where you instead specify the tags you want to keep. + +To use the ``zero`` plugin, enable the plugin in your configuration +(see :ref:`using-plugins`). + +Configuration +------------- + +Make a ``zero:`` section in your configuration file. You can specify the +fields to nullify and the conditions for nullifying them: + +* Set ``fields`` to a whitespace-separated list of fields to change. You can + get the list of all available fields by running ``beet fields``. In + addition, the ``images`` field allows you to remove any images + embedded in the media file. +* Set ``keep_fields`` to *invert* the logic of the plugin. Only these fields + will be kept; other fields will be removed. Remember to set only + ``fields`` or ``keep_fields``---not both! +* To conditionally filter a field, use ``field: [regexp, regexp]`` to specify + regular expressions. +* By default this plugin only affects files' tags ; the beets database is left + unchanged. To update the tags in the database, set the ``update_database`` option. For example:: @@ -19,6 +36,7 @@ fields: month day genre comments comments: [EAC, LAME, from.+collection, 'ripped by'] genre: [rnb, 'power metal'] + update_database: true If a custom pattern is not defined for a given field, the field will be nulled unconditionally. diff -Nru beets-1.3.8+dfsg/docs/reference/cli.rst beets-1.3.19/docs/reference/cli.rst --- beets-1.3.8+dfsg/docs/reference/cli.rst 2014-09-13 03:57:48.000000000 +0000 +++ beets-1.3.19/docs/reference/cli.rst 2016-06-20 01:53:12.000000000 +0000 @@ -128,10 +128,21 @@ ``--group-albums`` option to split the files based on their metadata before matching them as separate albums. +* If you want to preview which files would be imported, use the ``--pretend`` + option. If set, beets will just print a list of files that it would + otherwise import. + +* If you already have a metadata backend ID that matches the items to be + imported, you can instruct beets to restrict the search to that ID instead of + searching for other candidates by using the ``--search-id SEARCH_ID`` option. + Multiple IDs can be specified by simply repeating the option several times. + .. _rarfile: https://pypi.python.org/pypi/rarfile/2.2 .. only:: html + .. _reimport: + Reimporting ^^^^^^^^^^^ @@ -200,7 +211,7 @@ `````` :: - beet remove [-ad] QUERY + beet remove [-adf] QUERY Remove music from your library. @@ -208,6 +219,7 @@ You'll be shown a list of the files that will be removed and asked to confirm. By default, this just removes entries from the library database; it doesn't touch the files on disk. To actually delete the files, use ``beet remove -d``. +If you do not want to be prompted to remove the files, use ``beet remove -f``. .. _modify-cmd: @@ -239,7 +251,7 @@ ```` :: - beet move [-ca] [-d DIR] QUERY + beet move [-capt] [-d DIR] QUERY Move or copy items in your library. @@ -249,6 +261,11 @@ anywhere in your filesystem. The ``-c`` option copies files instead of moving them. As with other commands, the ``-a`` option matches albums instead of items. +To perform a "dry run", just use the ``-p`` (for "pretend") flag. This will +show you a list of files that would be moved but won't actually change anything +on disk. The ``-t`` option sets the timid mode which will ask again +before really moving or copying the files. + .. _update-cmd: update @@ -262,7 +279,10 @@ This will scan all the matched files and read their tags, populating the database with the new values. By default, files will be renamed according to -their new metadata; disable this with ``-M``. +their new metadata; disable this with ``-M``. Beets will skip files if their +modification times have not changed, so any out-of-band metadata changes must +also update these for ``beet update`` to recognise that the files have been +edited. To perform a "dry run" of an update, just use the ``-p`` (for "pretend") flag. This will show you all the proposed changes but won't actually change anything @@ -313,8 +333,9 @@ Show some statistics on your entire library (if you don't provide a :doc:`query <query>`) or the matched items (if you do). -The ``-e`` (``--exact``) option makes the calculation of total file size more -accurate but slower. +By default, the command calculates file sizes using their bitrate and +duration. The ``-e`` (``--exact``) option reads the exact sizes of each file +(but is slower). The exact mode also outputs the exact duration in seconds. .. _fields-cmd: @@ -325,7 +346,9 @@ beet fields Show the item and album metadata fields available for use in :doc:`query` and -:doc:`pathformat`. Includes any template fields provided by plugins. +:doc:`pathformat`. The listing includes any template fields provided by +plugins and any flexible attributes you've manually assigned to your items and +albums. .. _config-cmd: @@ -333,7 +356,7 @@ `````` :: - beet config [-pd] + beet config [-pdc] beet config -e Show or edit the user configuration. This command does one of three things: @@ -344,6 +367,8 @@ * The ``--path`` option instead shows the path to your configuration file. This can be combined with the ``--default`` flag to show where beets keeps its internal defaults. +* By default, sensitive information like passwords is removed when dumping the + configuration. The ``--clear`` option includes this sensitive data. * With the ``--edit`` option, beets attempts to open your config file for editing. It first tries the ``$EDITOR`` environment variable and then a fallback option depending on your platform: ``open`` on OS X, ``xdg-open`` @@ -362,7 +387,8 @@ * ``-l LIBPATH``: specify the library database file to use. * ``-d DIRECTORY``: specify the library root directory. * ``-v``: verbose mode; prints out a deluge of debugging information. Please use - this flag when reporting bugs. + this flag when reporting bugs. You can use it twice, as in ``-vv``, to make + beets even more verbose. * ``-c FILE``: read a specified YAML :doc:`configuration file <config>`. Beets also uses the ``BEETSDIR`` environment variable to look for @@ -407,9 +433,25 @@ that were enabled when running ``beet completion``. If you add a plugin later on you will want to re-generate the script. -If you use zsh, take a look instead at the included `completion script`_. +zsh +``` + +If you use zsh, take a look at the included `completion script`_. The script +should be placed in a directory that is part of your ``fpath``, and `not` +sourced in your ``.zshrc``. Running ``echo $fpath`` will give you a list of +valid directories. + +Another approach is to use zsh's bash completion compatibility. This snippet +defines some bash-specific functions to make this work without errors:: + + autoload bashcompinit + bashcompinit + _get_comp_words_by_ref() { :; } + compopt() { :; } + _filedir() { :; } + eval "$(beet completion)" -.. _completion script: https://github.com/sampsyo/beets/blob/master/extra/_beet +.. _completion script: https://github.com/beetbox/beets/blob/master/extra/_beet .. only:: man diff -Nru beets-1.3.8+dfsg/docs/reference/config.rst beets-1.3.19/docs/reference/config.rst --- beets-1.3.8+dfsg/docs/reference/config.rst 2014-09-16 21:49:16.000000000 +0000 +++ beets-1.3.19/docs/reference/config.rst 2016-06-20 01:53:12.000000000 +0000 @@ -64,6 +64,12 @@ A space-separated list of plugin module names to load. See :ref:`using-plugins`. +include +~~~~~~~ + +A space-separated list of extra configuration files to include. +Filenames are relative to the directory containing ``config.yaml``. + pluginpath ~~~~~~~~~~ @@ -77,13 +83,27 @@ - /path/one - /path/two +.. _ignore: + ignore ~~~~~~ A list of glob patterns specifying file and directory names to be ignored when -importing. By default, this consists of ``.*``, ``*~``, and ``System Volume -Information`` (i.e., beets ignores Unix-style hidden files, backup files, and -a directory that appears at the root of some Windows filesystems). +importing. By default, this consists of ``.*``, ``*~``, ``System Volume +Information``, ``lost+found`` (i.e., beets ignores Unix-style hidden files, +backup files, and directories that appears at the root of some Linux and Windows +filesystems). + +.. _ignore_hidden: + +ignore_hidden +~~~~~~~~~~~~~ + +Either ``yes`` or ``no``; whether to ignore hidden files when importing. On +Windows, the "Hidden" property of files is used to detect whether or not a file +is hidden. On OS X, the file's "IsHidden" flag is used to detect whether or not +a file is hidden. On both OS X and other platforms (excluding Windows), files +(and directories) starting with a dot are detected as hidden files. .. _replace: @@ -119,6 +139,11 @@ Trailing dots and trailing whitespace, which can cause problems on Windows clients, are also removed. +When replacements other than the defaults are used, it is possible that they +will increase the length of the path. In the scenario where this leads to a +conflict with the maximum filename length, the default replacements will be +used to resolve the conflict and beets will display a warning. + Note that paths might contain special characters such as typographical quotes (``“”``). With the configuration above, those will not be replaced as they don't match the typewriter quote (``"``). To also strip these @@ -159,35 +184,38 @@ ~~~~~~~~ Either ``yes`` or ``no``, indicating whether the autotagger should use -multiple threads. This makes things faster but may behave strangely. +multiple threads. This makes things substantially faster by overlapping work: +for example, it can copy files for one album in parallel with looking up data +in MusicBrainz for a different album. You may want to disable this when +debugging problems with the autotagger. Defaults to ``yes``. -color -~~~~~ - -Either ``yes`` or ``no``; whether to use color in console output (currently -only in the ``import`` command). Turn this off if your terminal doesn't -support ANSI colors. .. _list_format_item: +.. _format_item: -list_format_item -~~~~~~~~~~~~~~~~ +format_item +~~~~~~~~~~~ Format to use when listing *individual items* with the :ref:`list-cmd` command and other commands that need to print out items. Defaults to ``$artist - $album - $title``. The ``-f`` command-line option overrides this setting. +It used to be named `list_format_item`. + .. _list_format_album: +.. _format_album: -list_format_album -~~~~~~~~~~~~~~~~~ +format_album +~~~~~~~~~~~~ Format to use when listing *albums* with :ref:`list-cmd` and other commands. Defaults to ``$albumartist - $album``. The ``-f`` command-line option overrides this setting. +It used to be named `list_format_album`. + .. _sort_item: sort_item @@ -204,6 +232,15 @@ Default sort order to use when fetching items from the database. Defaults to ``albumartist+ album+``. Explicit sort orders override this default. +.. _sort_case_insensitive: + +sort_case_insensitive +~~~~~~~~~~~~~~~~~~~~~ +Either ``yes`` or ``no``, indicating whether the case should be ignored when +sorting lexicographic fields. When set to ``no``, lower-case values will be +placed after upper-case values (e.g., *Bar Qux foo*), while ``yes`` would +result in the more expected *Bar foo Qux*. Default: ``yes``. + .. _original_date: original_date @@ -224,7 +261,7 @@ default (``per_disc_numbering: no``), tracks are numbered per-release, so the first track on the second disc has track number N+1 where N is the number of tracks on the first disc. If this ``per_disc_numbering`` is enabled, then the -first track on each disc always has track number 1. +first (non-pregap) track on each disc always has track number 1. If you enable ``per_disc_numbering``, you will likely want to change your :ref:`path-format-config` also to include ``$disc`` before ``$track`` to make @@ -234,13 +271,19 @@ paths: default: $albumartist/$album%aunique{}/$disc-$track $title +When this option is off (the default), even "pregap" hidden tracks are +numbered from one, not zero, so other track numbers may appear to be bumped up +by one. When it is on, the pregap track for each disc can be numbered zero. + + .. _terminal_encoding: terminal_encoding ~~~~~~~~~~~~~~~~~ The text encoding, as `known to Python`_, to use for messages printed to the -standard output. By default, this is determined automatically from the locale +standard output. It's also used to read messages from the standard input. +By default, this is determined automatically from the locale environment variables. .. _known to python: http://docs.python.org/2/library/codecs.html#standard-encodings @@ -255,6 +298,9 @@ files whose names match the glob patterns in `clutter`, which should be a list of strings. The default list consists of "Thumbs.DB" and ".DS_Store". +The importer only removes recursively searched subdirectories---the top-level +directory you specify on the command line is never deleted. + .. _max_filename_length: max_filename_length @@ -273,6 +319,59 @@ version of ID3. Enable this option to instead use the older ID3v2.3 standard, which is preferred by certain older software such as Windows Media Player. +.. _va_name: + +va_name +~~~~~~~ + +Sets the albumartist for various-artist compilations. Defaults to ``'Various +Artists'`` (the MusicBrainz standard). Affects other sources, such as +:doc:`/plugins/discogs`, too. + + +UI Options +---------- + +The options that allow for customization of the visual appearance +of the console interface. + +These options are available in this section: + +color +~~~~~ + +Either ``yes`` or ``no``; whether to use color in console output (currently +only in the ``import`` command). Turn this off if your terminal doesn't +support ANSI colors. + +.. note:: + + The `color` option was previously a top-level configuration. This is + still respected, but a deprecation message will be shown until your + top-level `color` configuration has been nested under `ui`. + +colors +~~~~~~ + +The colors that are used throughout the user interface. These are only used if +the ``color`` option is set to ``yes``. For example, you might have a section +in your configuration file that looks like this:: + + ui: + color: yes + colors: + text_success: green + text_warning: yellow + text_error: red + text_highlight: red + text_highlight_minor: lightgray + action_default: turquoise + action: blue + +Available colors: black, darkred, darkgreen, brown (darkyellow), darkblue, +purple (darkmagenta), teal (darkcyan), lightgray, darkgray, red, green, +yellow, blue, fuchsia (magenta), turquoise (cyan), white + Importer Options ---------------- @@ -295,6 +394,8 @@ written to files when using ``beet import``. Defaults to ``yes``. The ``-w`` and ``-W`` command-line options override this setting. +.. _config-import-copy: + copy ~~~~ @@ -305,6 +406,8 @@ The option is ignored if ``move`` is enabled (i.e., beets can move or copy files but it doesn't make sense to do both). +.. _config-import-move: + move ~~~~ @@ -322,6 +425,21 @@ (and not copy) files. The ``-c`` switch to the ``beet import`` command, however, still takes precedence. +.. _link: + +link +~~~~ + +Either ``yes`` or ``no``, indicating whether to use symbolic links instead of +moving or copying files. (It conflicts with the ``move`` and ``copy`` +options.) Defaults to ``no``. + +This option only works on platforms that support symbolic links: i.e., Unixes. +It will fail on Windows. + +It's likely that you'll also want to set ``write`` to ``no`` if you use this +option to preserve the metadata on the linked files. + resume ~~~~~~ @@ -418,12 +536,6 @@ The ``--group-albums`` or ``-g`` option to the :ref:`import-cmd` command is equivalent, and the *G* interactive option invokes the same workflow. -.. note:: - - The :ref:`import log <import_log>` currently contains less information - in album-grouping mode. (Specifically, no directory names recorded because - directories are not used for grouping in this mode.) - Default: ``no``. .. _autotag: @@ -438,6 +550,17 @@ Default: ``yes``. +.. _duplicate_action: + +duplicate_action +~~~~~~~~~~~~~~~~ + +Either ``skip``, ``keep``, ``remove``, or ``ask``. Controls how duplicates +are treated in import task. "skip" means that new item(album or track) will be +skiped; "keep" means keep both old and new items; "remove" means remove old +item; "ask" means the user should be prompted for the action each time. +The default is ``ask``. + .. _musicbrainz-config: @@ -462,6 +585,16 @@ .. _limited: http://musicbrainz.org/doc/XML_Web_Service/Rate_Limiting .. _MusicBrainz: http://musicbrainz.org/ +.. _searchlimit: + +searchlimit +~~~~~~~~~~~ + +The number of matches returned when sending search queries to the +MusicBrainz server. + +Default: ``5``. + .. _match-config: Autotagger Matching Options @@ -608,7 +741,7 @@ singleton: Non-Album/$artist/$title comp: Compilations/$album%aunique{}/$track $title -Note the use of ``$albumartist`` instead of ``$artist``; this ensure that albums +Note the use of ``$albumartist`` instead of ``$artist``; this ensures that albums will be well-organized. For more about these format strings, see :doc:`pathformat`. The ``aunique{}`` function ensures that identically-named albums are placed in different directories; see :ref:`aunique` for details. @@ -691,11 +824,13 @@ timid: no log: beetslog.txt ignore: .AppleDouble ._* *~ .DS_Store + ignore_hidden: yes art_filename: albumart plugins: bpd pluginpath: ~/beets/myplugins threaded: yes - color: yes + ui: + color: yes paths: default: $genre/$albumartist/$album/$track $title diff -Nru beets-1.3.8+dfsg/docs/reference/pathformat.rst beets-1.3.19/docs/reference/pathformat.rst --- beets-1.3.8+dfsg/docs/reference/pathformat.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/docs/reference/pathformat.rst 2016-06-20 01:53:12.000000000 +0000 @@ -76,6 +76,16 @@ * ``%time{date_time,format}``: Return the date and time in any format accepted by `strftime`_. For example, to get the year some music was added to your library, use ``%time{$added,%Y}``. +* ``%first{text}``: Returns the first item, separated by ``; ``. + You can use ``%first{text,count,skip}``, where ``count`` is the number of + items (default 1) and ``skip`` is number to skip (default 0). You can also use + ``%first{text,count,skip,sep,join}`` where ``sep`` is the separator, like + ``;`` or ``/`` and join is the text to concatenate the items. + For example, +* ``%ifdef{field}``, ``%ifdef{field,truetext}`` or + ``%ifdef{field,truetext,falsetext}``: If ``field`` exists, then return + ``truetext`` or ``field`` (default). Otherwise, returns ``falsetext``. + The ``field`` should be entered without ``$``. .. _unidecode module: http://pypi.python.org/pypi/Unidecode .. _strftime: http://docs.python.org/2/library/time.html#time.strftime diff -Nru beets-1.3.8+dfsg/docs/reference/query.rst beets-1.3.19/docs/reference/query.rst --- beets-1.3.8+dfsg/docs/reference/query.rst 2014-09-16 21:53:09.000000000 +0000 +++ beets-1.3.19/docs/reference/query.rst 2016-06-20 01:53:12.000000000 +0000 @@ -13,7 +13,7 @@ $ beet list love -will show all tracks matching the query string ``love``. Any unadorned word like this matches *anywhere* in a track's metadata, so you'll see all the tracks with "love" in their title, in their album name, in the artist, and so on. +will show all tracks matching the query string ``love``. By default any unadorned word like this matches in a track's title, artist, album name, album artist, genre and comments. See below on how to search other fields. For example, this is what I might see when I run the command above:: @@ -23,6 +23,8 @@ Bat for Lashes - Two Suns - Good Love ... +.. _combiningqueries: + Combining Keywords ------------------ @@ -37,6 +39,15 @@ match "Tomorrowland" by Walter Meego---those songs only have *one* of the two keywords I specified. +Keywords can also be joined with a Boolean "or" using a comma. For example, +the command:: + + $ beet ls magnetic tomorrow , beatles yesterday + +will match both "The House of Tomorrow" by the Magnetic Fields, as well as +"Yesterday" by The Beatles. Note that the comma has to be followed by a space +(e.g., ``foo,bar`` will be treated as a single keyword, *not* as an OR-query). + Specific Fields --------------- @@ -89,7 +100,7 @@ regular expression matching for more advanced queries. To run a regex query, use an additional ``:`` between the field name and the expression:: - $ beet list 'artist::Ann(a|ie)' + $ beet list "artist::Ann(a|ie)" That query finds songs by Anna Calvi and Annie but not Annuals. Similarly, this query prints the path to any file in my library that's missing a track title:: @@ -99,11 +110,16 @@ To search *all* fields using a regular expression, just prefix the expression with a single ``:``, like so:: - $ beet list :Ho[pm]eless + $ beet list ":Ho[pm]eless" Regular expressions are case-sensitive and build on `Python's built-in implementation`_. See Python's documentation for specifics on regex syntax. +Most command-line shells will try to interpret common characters in regular +expressions, such as ``()[]|``. To type those characters, you'll need to +escape them (e.g., with backslashes or quotation marks, depending on your +shell). + .. _Python's built-in implementation: http://docs.python.org/library/re.html @@ -128,6 +144,11 @@ $ beet list format:MP3 bitrate:..128000 +The ``length`` field also lets you use a "M:SS" format. For example, this +query finds tracks that are less than four and a half minutes in length:: + + $ beet list length:..4:30 + .. _datequery: @@ -165,6 +186,35 @@ $ beet ls 'mtime:2008-12-01..2008-12-02' +.. _not_query: + +Query Term Negation +------------------- + +Query terms can also be negated, acting like a Boolean "not," by prefixing +them with ``-`` or ``^``. This has the effect of returning all the items that +do **not** match the query term. For example, this command:: + + $ beet list ^love + +matches all the songs in the library that do not have "love" in any of their +fields. + +Negation can be combined with the rest of the query mechanisms, so you can +negate specific fields, regular expressions, etc. For example, this command:: + + $ beet list -a artist:dylan ^year:1980..1989 "^album::the(y)?" + +matches all the albums with an artist containing "dylan", but excluding those +released in the eighties and those that have "the" or "they" on the title. + +The syntax supports both ``^`` and ``-`` as synonyms because the latter +indicates flags on the command line. To use a minus sign in a command-line +query, use a double dash ``--`` to separate the options from the query:: + + $ beet list -a -- artist:dylan -year:1980..1990 "-album::the(y)?" + +.. _pathquery: Path Queries ------------ @@ -175,8 +225,8 @@ $ beet list path:/my/music/directory In fact, beets automatically recognizes any query term containing a path -separator (``/`` on POSIX systems) as a path query, so this command is -equivalent:: +separator (``/`` on POSIX systems) as a path query if that path exists, so this +command is equivalent as long as ``/my/music/directory`` exist:: $ beet list /my/music/directory @@ -184,6 +234,8 @@ query won't necessarily find *all* the audio files in a directory---just the ones you've already added to your beets library. +Path queries are case sensitive if the queried path is on a case-sensitive +filesystem. .. _query-sort: @@ -208,5 +260,15 @@ corresponding ``artist_sort`` and ``albumartist_sort`` fields for sorting transparently (but fall back to the ordinary fields when those are empty). +Lexicographic sorts are case insensitive by default, resulting in the following +sort order: ``Bar foo Qux``. This behavior can be changed with the +:ref:`sort_case_insensitive` configuration option. Case sensitive sort will +result in lower-case values being placed after upper-case values, e.g., +``Bar Qux foo``. + +Note that when sorting by fields that are not present on all items (such as +flexible fields, or those defined by plugins) in *ascending* order, the items +that lack that particular field will be listed at the *beginning* of the list. + You can set the default sorting behavior with the :ref:`sort_item` and :ref:`sort_album` configuration options. diff -Nru beets-1.3.8+dfsg/docs/serve.py beets-1.3.19/docs/serve.py --- beets-1.3.8+dfsg/docs/serve.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/docs/serve.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +from __future__ import division, absolute_import, print_function + +from livereload import Server, shell + +server = Server() +server.watch('*.rst', shell('make html')) +server.serve(root='_build/html') diff -Nru beets-1.3.8+dfsg/LICENSE beets-1.3.19/LICENSE --- beets-1.3.8+dfsg/LICENSE 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/LICENSE 2016-06-20 01:53:12.000000000 +0000 @@ -1,6 +1,6 @@ The MIT License -Copyright (c) 2010-2014 Adrian Sampson +Copyright (c) 2010-2016 Adrian Sampson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff -Nru beets-1.3.8+dfsg/man/beet.1 beets-1.3.19/man/beet.1 --- beets-1.3.8+dfsg/man/beet.1 2014-09-18 02:18:16.000000000 +0000 +++ beets-1.3.19/man/beet.1 2016-06-26 00:52:50.000000000 +0000 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "BEET" "1" "September 17, 2014" "1.3" "beets" +.TH "BEET" "1" "Jun 25, 2016" "1.3" "beets" .SH NAME beet \- music tagger and library organizer . @@ -57,7 +57,7 @@ compressed archives. The music will be copied to a configurable directory structure and added to a library database. The command is interactive and will try to get you to verify MusicBrainz tags that it -thinks are suspect. See the \fBautotagging guide\fP +thinks are suspect. See the autotagging guide for detail on how to use the interactive tag\-correction flow. .sp Directories passed to the import command can contain either a single @@ -127,6 +127,15 @@ Similarly, if you have one directory that contains multiple albums, use the \fB\-\-group\-albums\fP option to split the files based on their metadata before matching them as separate albums. +.IP \(bu 2 +If you want to preview which files would be imported, use the \fB\-\-pretend\fP +option. If set, beets will just print a list of files that it would +otherwise import. +.IP \(bu 2 +If you already have a metadata backend ID that matches the items to be +imported, you can instruct beets to restrict the search to that ID instead of +searching for other candidates by using the \fB\-\-search\-id SEARCH_ID\fP option. +Multiple IDs can be specified by simply repeating the option several times. .UNINDENT .SS list .INDENT 0.0 @@ -140,12 +149,12 @@ .UNINDENT .UNINDENT .sp -\fBQueries\fP the database for music. +Queries the database for music. .sp Want to search for "Gronlandic Edit" by of Montreal? Try \fBbeet list gronlandic\fP\&. Maybe you want to see everything released in 2009 with "vegetables" in the title? Try \fBbeet list year:2009 title:vegetables\fP\&. You -can also specify the sort order. (Read more in \fBquery\fP\&.) +can also specify the sort order. (Read more in query\&.) .sp You can use the \fB\-a\fP switch to search for albums instead of individual items. In this case, the queries you use are restricted to album\-level fields: for @@ -156,7 +165,7 @@ The \fB\-p\fP option makes beets print out filenames of matched items, which might be useful for piping into other Unix commands (such as \fI\%xargs\fP). Similarly, the \fB\-f\fP option lets you specify a specific format with which to print every album -or track. This uses the same template syntax as beets\(aq \fBpath formats\fP\&. For example, the command \fBbeet ls \-af \(aq$album: $tracktotal\(aq +or track. This uses the same template syntax as beets\(aq path formats\&. For example, the command \fBbeet ls \-af \(aq$album: $tracktotal\(aq beatles\fP prints out the number of tracks on each Beatles album. In Unix shells, remember to enclose the template argument in single quotes to avoid environment variable expansion. @@ -166,7 +175,7 @@ .sp .nf .ft C -beet remove [\-ad] QUERY +beet remove [\-adf] QUERY .ft P .fi .UNINDENT @@ -174,10 +183,11 @@ .sp Remove music from your library. .sp -This command uses the same \fBquery\fP syntax as the \fBlist\fP command. +This command uses the same query syntax as the \fBlist\fP command. You\(aqll be shown a list of the files that will be removed and asked to confirm. By default, this just removes entries from the library database; it doesn\(aqt touch the files on disk. To actually delete the files, use \fBbeet remove \-d\fP\&. +If you do not want to be prompted to remove the files, use \fBbeet remove \-f\fP\&. .SS modify .INDENT 0.0 .INDENT 3.5 @@ -192,7 +202,7 @@ .sp Change the metadata for items or albums in the database. .sp -Supply a \fBquery\fP matching the things you want to change and a +Supply a query matching the things you want to change and a series of \fBfield=value\fP pairs. For example, \fBbeet modify genius of love artist="Tom Tom Club"\fP will change the artist for the track "Genius of Love." To remove fields (which is only possible for flexible attributes), follow a @@ -211,7 +221,7 @@ .sp .nf .ft C -beet move [\-ca] [\-d DIR] QUERY +beet move [\-capt] [\-d DIR] QUERY .ft P .fi .UNINDENT @@ -224,6 +234,11 @@ destination directory with \fB\-d\fP manually, you can move items matching a query anywhere in your filesystem. The \fB\-c\fP option copies files instead of moving them. As with other commands, the \fB\-a\fP option matches albums instead of items. +.sp +To perform a "dry run", just use the \fB\-p\fP (for "pretend") flag. This will +show you a list of files that would be moved but won\(aqt actually change anything +on disk. The \fB\-t\fP option sets the timid mode which will ask again +before really moving or copying the files. .SS update .INDENT 0.0 .INDENT 3.5 @@ -241,7 +256,10 @@ .sp This will scan all the matched files and read their tags, populating the database with the new values. By default, files will be renamed according to -their new metadata; disable this with \fB\-M\fP\&. +their new metadata; disable this with \fB\-M\fP\&. Beets will skip files if their +modification times have not changed, so any out\-of\-band metadata changes must +also update these for \fBbeet update\fP to recognise that the files have been +edited. .sp To perform a "dry run" of an update, just use the \fB\-p\fP (for "pretend") flag. This will show you all the proposed changes but won\(aqt actually change anything @@ -293,10 +311,11 @@ .UNINDENT .sp Show some statistics on your entire library (if you don\(aqt provide a -\fBquery\fP) or the matched items (if you do). +query) or the matched items (if you do). .sp -The \fB\-e\fP (\fB\-\-exact\fP) option makes the calculation of total file size more -accurate but slower. +By default, the command calculates file sizes using their bitrate and +duration. The \fB\-e\fP (\fB\-\-exact\fP) option reads the exact sizes of each file +(but is slower). The exact mode also outputs the exact duration in seconds. .SS fields .INDENT 0.0 .INDENT 3.5 @@ -309,15 +328,17 @@ .UNINDENT .UNINDENT .sp -Show the item and album metadata fields available for use in \fBquery\fP and -\fBpathformat\fP\&. Includes any template fields provided by plugins. +Show the item and album metadata fields available for use in query and +pathformat\&. The listing includes any template fields provided by +plugins and any flexible attributes you\(aqve manually assigned to your items and +albums. .SS config .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C -beet config [\-pd] +beet config [\-pdc] beet config \-e .ft P .fi @@ -335,6 +356,9 @@ This can be combined with the \fB\-\-default\fP flag to show where beets keeps its internal defaults. .IP \(bu 2 +By default, sensitive information like passwords is removed when dumping the +configuration. The \fB\-\-clear\fP option includes this sensitive data. +.IP \(bu 2 With the \fB\-\-edit\fP option, beets attempts to open your config file for editing. It first tries the \fB$EDITOR\fP environment variable and then a fallback option depending on your platform: \fBopen\fP on OS X, \fBxdg\-open\fP @@ -352,9 +376,10 @@ \fB\-d DIRECTORY\fP: specify the library root directory. .IP \(bu 2 \fB\-v\fP: verbose mode; prints out a deluge of debugging information. Please use -this flag when reporting bugs. +this flag when reporting bugs. You can use it twice, as in \fB\-vv\fP, to make +beets even more verbose. .IP \(bu 2 -\fB\-c FILE\fP: read a specified YAML \fBconfiguration file\fP\&. +\fB\-c FILE\fP: read a specified YAML configuration file\&. .UNINDENT .sp Beets also uses the \fBBEETSDIR\fP environment variable to look for @@ -406,16 +431,38 @@ Completion of plugin commands only works for those plugins that were enabled when running \fBbeet completion\fP\&. If you add a plugin later on you will want to re\-generate the script. +.SS zsh +.sp +If you use zsh, take a look at the included \fI\%completion script\fP\&. The script +should be placed in a directory that is part of your \fBfpath\fP, and \fInot\fP +sourced in your \fB\&.zshrc\fP\&. Running \fBecho $fpath\fP will give you a list of +valid directories. +.sp +Another approach is to use zsh\(aqs bash completion compatibility. This snippet +defines some bash\-specific functions to make this work without errors: +.INDENT 0.0 +.INDENT 3.5 .sp -If you use zsh, take a look instead at the included \fI\%completion script\fP\&. +.nf +.ft C +autoload bashcompinit +bashcompinit +_get_comp_words_by_ref() { :; } +compopt() { :; } +_filedir() { :; } +eval "$(beet completion)" +.ft P +.fi +.UNINDENT +.UNINDENT .SH SEE ALSO .sp \fBhttp://beets.readthedocs.org/\fP .sp -\fIbeetsconfig(5)\fP +\fBbeetsconfig(5)\fP .SH AUTHOR Adrian Sampson .SH COPYRIGHT -2012, Adrian Sampson +2016, Adrian Sampson .\" Generated by docutils manpage writer. . diff -Nru beets-1.3.8+dfsg/man/beetsconfig.5 beets-1.3.19/man/beetsconfig.5 --- beets-1.3.8+dfsg/man/beetsconfig.5 2014-09-18 02:18:16.000000000 +0000 +++ beets-1.3.19/man/beetsconfig.5 2016-06-26 00:52:50.000000000 +0000 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "BEETSCONFIG" "5" "September 17, 2014" "1.3" "beets" +.TH "BEETSCONFIG" "5" "Jun 25, 2016" "1.3" "beets" .SH NAME beetsconfig \- beets configuration file . @@ -47,7 +47,7 @@ .UNINDENT .sp You can launch your text editor to create or update your configuration by -typing \fBbeet config \-e\fP\&. (See the \fIconfig\-cmd\fP command for details.) It +typing \fBbeet config \-e\fP\&. (See the config\-cmd command for details.) It is also possible to customize the location of the configuration file and even use multiple layers of configuration. See \fI\%Configuration Location\fP, below. .sp @@ -87,10 +87,14 @@ .IP \(bu 2 \fI\%plugins\fP .IP \(bu 2 +\fI\%include\fP +.IP \(bu 2 \fI\%pluginpath\fP .IP \(bu 2 \fI\%ignore\fP .IP \(bu 2 +\fI\%ignore_hidden\fP +.IP \(bu 2 \fI\%replace\fP .IP \(bu 2 \fI\%asciify_paths\fP @@ -99,16 +103,16 @@ .IP \(bu 2 \fI\%threaded\fP .IP \(bu 2 -\fI\%color\fP +\fI\%format_item\fP .IP \(bu 2 -\fI\%list_format_item\fP -.IP \(bu 2 -\fI\%list_format_album\fP +\fI\%format_album\fP .IP \(bu 2 \fI\%sort_item\fP .IP \(bu 2 \fI\%sort_album\fP .IP \(bu 2 +\fI\%sort_case_insensitive\fP +.IP \(bu 2 \fI\%original_date\fP .IP \(bu 2 \fI\%per_disc_numbering\fP @@ -120,6 +124,16 @@ \fI\%max_filename_length\fP .IP \(bu 2 \fI\%id3v23\fP +.IP \(bu 2 +\fI\%va_name\fP +.UNINDENT +.IP \(bu 2 +\fI\%UI Options\fP +.INDENT 2.0 +.IP \(bu 2 +\fI\%color\fP +.IP \(bu 2 +\fI\%colors\fP .UNINDENT .IP \(bu 2 \fI\%Importer Options\fP @@ -131,6 +145,8 @@ .IP \(bu 2 \fI\%move\fP .IP \(bu 2 +\fI\%link\fP +.IP \(bu 2 \fI\%resume\fP .IP \(bu 2 \fI\%incremental\fP @@ -152,9 +168,15 @@ \fI\%group_albums\fP .IP \(bu 2 \fI\%autotag\fP +.IP \(bu 2 +\fI\%duplicate_action\fP .UNINDENT .IP \(bu 2 \fI\%MusicBrainz Options\fP +.INDENT 2.0 +.IP \(bu 2 +\fI\%searchlimit\fP +.UNINDENT .IP \(bu 2 \fI\%Autotagger Matching Options\fP .INDENT 2.0 @@ -196,7 +218,11 @@ .SS plugins .sp A space\-separated list of plugin module names to load. See -\fIusing\-plugins\fP\&. +using\-plugins\&. +.SS include +.sp +A space\-separated list of extra configuration files to include. +Filenames are relative to the directory containing \fBconfig.yaml\fP\&. .SS pluginpath .sp Directories to search for plugins. Each Python file or directory in a plugin @@ -219,9 +245,17 @@ .SS ignore .sp A list of glob patterns specifying file and directory names to be ignored when -importing. By default, this consists of \fB\&.*\fP, \fB*~\fP, and \fBSystem Volume -Information\fP (i.e., beets ignores Unix\-style hidden files, backup files, and -a directory that appears at the root of some Windows filesystems). +importing. By default, this consists of \fB\&.*\fP, \fB*~\fP, \fBSystem Volume +Information\fP, \fBlost+found\fP (i.e., beets ignores Unix\-style hidden files, +backup files, and directories that appears at the root of some Linux and Windows +filesystems). +.SS ignore_hidden +.sp +Either \fByes\fP or \fBno\fP; whether to ignore hidden files when importing. On +Windows, the "Hidden" property of files is used to detect whether or not a file +is hidden. On OS X, the file\(aqs "IsHidden" flag is used to detect whether or not +a file is hidden. On both OS X and other platforms (excluding Windows), files +(and directories) starting with a dot are detected as hidden files. .SS replace .sp A set of regular expression/replacement pairs to be applied to all filenames @@ -261,6 +295,11 @@ Trailing dots and trailing whitespace, which can cause problems on Windows clients, are also removed. .sp +When replacements other than the defaults are used, it is possible that they +will increase the length of the path. In the scenario where this leads to a +conflict with the maximum filename length, the default replacements will be +used to resolve the conflict and beets will display a warning. +.sp Note that paths might contain special characters such as typographical quotes (\fB“”\fP). With the configuration above, those will not be replaced as they don\(aqt match the typewriter quote (\fB"\fP). To also strip these @@ -275,37 +314,39 @@ then the track will be saved as \fBsingletons/Cafe.mp3\fP\&. The changes take place before applying the \fI\%replace\fP configuration and are roughly equivalent to wrapping all your path templates in the \fB%asciify{}\fP -\fItemplate function\fP\&. +template function\&. .sp Default: \fBno\fP\&. .SS art_filename .sp When importing album art, the name of the file (without extension) where the cover art image should be placed. This is a template string, so you can use any -of the syntax available to \fB/reference/pathformat\fP\&. Defaults to \fBcover\fP +of the syntax available to /reference/pathformat\&. Defaults to \fBcover\fP (i.e., images will be named \fBcover.jpg\fP or \fBcover.png\fP and placed in the album\(aqs directory). .SS threaded .sp Either \fByes\fP or \fBno\fP, indicating whether the autotagger should use -multiple threads. This makes things faster but may behave strangely. +multiple threads. This makes things substantially faster by overlapping work: +for example, it can copy files for one album in parallel with looking up data +in MusicBrainz for a different album. You may want to disable this when +debugging problems with the autotagger. Defaults to \fByes\fP\&. -.SS color +.SS format_item .sp -Either \fByes\fP or \fBno\fP; whether to use color in console output (currently -only in the \fBimport\fP command). Turn this off if your terminal doesn\(aqt -support ANSI colors. -.SS list_format_item -.sp -Format to use when listing \fIindividual items\fP with the \fIlist\-cmd\fP +Format to use when listing \fIindividual items\fP with the list\-cmd command and other commands that need to print out items. Defaults to \fB$artist \- $album \- $title\fP\&. The \fB\-f\fP command\-line option overrides this setting. -.SS list_format_album .sp -Format to use when listing \fIalbums\fP with \fIlist\-cmd\fP and other +It used to be named \fIlist_format_item\fP\&. +.SS format_album +.sp +Format to use when listing \fIalbums\fP with list\-cmd and other commands. Defaults to \fB$albumartist \- $album\fP\&. The \fB\-f\fP command\-line option overrides this setting. +.sp +It used to be named \fIlist_format_album\fP\&. .SS sort_item .sp Default sort order to use when fetching items from the database. Defaults to @@ -314,6 +355,12 @@ .sp Default sort order to use when fetching items from the database. Defaults to \fBalbumartist+ album+\fP\&. Explicit sort orders override this default. +.SS sort_case_insensitive +.sp +Either \fByes\fP or \fBno\fP, indicating whether the case should be ignored when +sorting lexicographic fields. When set to \fBno\fP, lower\-case values will be +placed after upper\-case values (e.g., \fIBar Qux foo\fP), while \fByes\fP would +result in the more expected \fIBar foo Qux\fP\&. Default: \fByes\fP\&. .SS original_date .sp Either \fByes\fP or \fBno\fP, indicating whether matched albums should have their @@ -327,7 +374,7 @@ default (\fBper_disc_numbering: no\fP), tracks are numbered per\-release, so the first track on the second disc has track number N+1 where N is the number of tracks on the first disc. If this \fBper_disc_numbering\fP is enabled, then the -first track on each disc always has track number 1. +first (non\-pregap) track on each disc always has track number 1. .sp If you enable \fBper_disc_numbering\fP, you will likely want to change your \fI\%Path Format Configuration\fP also to include \fB$disc\fP before \fB$track\fP to make @@ -344,10 +391,15 @@ .fi .UNINDENT .UNINDENT +.sp +When this option is off (the default), even "pregap" hidden tracks are +numbered from one, not zero, so other track numbers may appear to be bumped up +by one. When it is on, the pregap track for each disc can be numbered zero. .SS terminal_encoding .sp The text encoding, as \fI\%known to Python\fP, to use for messages printed to the -standard output. By default, this is determined automatically from the locale +standard output. It\(aqs also used to read messages from the standard input. +By default, this is determined automatically from the locale environment variables. .SS clutter .sp @@ -355,6 +407,9 @@ directory if it\(aqs empty. A directory is considered empty if it only contains files whose names match the glob patterns in \fIclutter\fP, which should be a list of strings. The default list consists of "Thumbs.DB" and ".DS_Store". +.sp +The importer only removes recursively searched subdirectories\-\-\-the top\-level +directory you specify on the command line is never deleted. .SS max_filename_length .sp Set the maximum number of characters in a filename, after which names will be @@ -365,9 +420,62 @@ By default, beets writes MP3 tags using the ID3v2.4 standard, the latest version of ID3. Enable this option to instead use the older ID3v2.3 standard, which is preferred by certain older software such as Windows Media Player. +.SS va_name +.sp +Sets the albumartist for various\-artist compilations. Defaults to \fB\(aqVarious +Artists\(aq\fP (the MusicBrainz standard). Affects other sources, such as +/plugins/discogs, too. +.SH UI OPTIONS +.sp +The options that allow for customization of the visual appearance +of the console interface. +.sp +These options are available in this section: +.SS color +.sp +Either \fByes\fP or \fBno\fP; whether to use color in console output (currently +only in the \fBimport\fP command). Turn this off if your terminal doesn\(aqt +support ANSI colors. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +The \fIcolor\fP option was previously a top\-level configuration. This is +still respected, but a deprecation message will be shown until your +top\-level \fIcolor\fP configuration has been nested under \fIui\fP\&. +.UNINDENT +.UNINDENT +.SS colors +.sp +The colors that are used throughout the user interface. These are only used if +the \fBcolor\fP option is set to \fByes\fP\&. For example, you might have a section +in your configuration file that looks like this: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +ui: + color: yes + colors: + text_success: green + text_warning: yellow + text_error: red + text_highlight: red + text_highlight_minor: lightgray + action_default: turquoise + action: blue +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Available colors: black, darkred, darkgreen, brown (darkyellow), darkblue, +purple (darkmagenta), teal (darkcyan), lightgray, darkgray, red, green, +yellow, blue, fuchsia (magenta), turquoise (cyan), white .SH IMPORTER OPTIONS .sp -The options that control the \fIimport\-cmd\fP command are indented under the +The options that control the import\-cmd command are indented under the \fBimport:\fP key. For example, you might have a section in your configuration file that looks like this: .INDENT 0.0 @@ -413,6 +521,17 @@ This option \fIoverrides\fP \fBcopy\fP, so enabling it will always move (and not copy) files. The \fB\-c\fP switch to the \fBbeet import\fP command, however, still takes precedence. +.SS link +.sp +Either \fByes\fP or \fBno\fP, indicating whether to use symbolic links instead of +moving or copying files. (It conflicts with the \fBmove\fP and \fBcopy\fP +options.) Defaults to \fBno\fP\&. +.sp +This option only works on platforms that support symbolic links: i.e., Unixes. +It will fail on Windows. +.sp +It\(aqs likely that you\(aqll also want to set \fBwrite\fP to \fBno\fP if you use this +option to preserve the metadata on the linked files. .SS resume .sp Either \fByes\fP, \fBno\fP, or \fBask\fP\&. Controls whether interrupted imports @@ -473,27 +592,25 @@ partition albums. Enable this option if you have directories that contain tracks from many albums mixed together. .sp -The \fB\-\-group\-albums\fP or \fB\-g\fP option to the \fIimport\-cmd\fP command is +The \fB\-\-group\-albums\fP or \fB\-g\fP option to the import\-cmd command is equivalent, and the \fIG\fP interactive option invokes the same workflow. .sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -The \fI\%import log\fP currently contains less information -in album\-grouping mode. (Specifically, no directory names recorded because -directories are not used for grouping in this mode.) -.UNINDENT -.UNINDENT -.sp Default: \fBno\fP\&. .SS autotag .sp By default, the beets importer always attempts to autotag new music. If most of your collection consists of obscure music, you may be interested in disabling autotagging by setting this option to \fBno\fP\&. (You can re\-enable it -with the \fB\-a\fP flag to the \fIimport\-cmd\fP command.) +with the \fB\-a\fP flag to the import\-cmd command.) .sp Default: \fByes\fP\&. +.SS duplicate_action +.sp +Either \fBskip\fP, \fBkeep\fP, \fBremove\fP, or \fBask\fP\&. Controls how duplicates +are treated in import task. "skip" means that new item(album or track) will be +skiped; "keep" means keep both old and new items; "remove" means remove old +item; "ask" means the user should be prompted for the action each time. +The default is \fBask\fP\&. .SH MUSICBRAINZ OPTIONS .sp If you run your own \fI\%MusicBrainz\fP server, you can instruct beets to use it @@ -518,6 +635,12 @@ per second (default: 1). \fBDo not change the rate limit setting\fP if you\(aqre using the main MusicBrainz server\-\-\-on this public server, you\(aqre \fI\%limited\fP to one request per second. +.SS searchlimit +.sp +The number of matches returned when sending search queries to the +MusicBrainz server. +.sp +Default: \fB5\fP\&. .SH AUTOTAGGER MATCHING OPTIONS .sp You can configure some aspects of the logic beets uses when automatically @@ -709,13 +832,13 @@ .UNINDENT .UNINDENT .sp -Note the use of \fB$albumartist\fP instead of \fB$artist\fP; this ensure that albums +Note the use of \fB$albumartist\fP instead of \fB$artist\fP; this ensures that albums will be well\-organized. For more about these format strings, see -\fBpathformat\fP\&. The \fBaunique{}\fP function ensures that identically\-named -albums are placed in different directories; see \fIaunique\fP for details. +pathformat\&. The \fBaunique{}\fP function ensures that identically\-named +albums are placed in different directories; see aunique for details. .sp In addition to \fBdefault\fP, \fBcomp\fP, and \fBsingleton\fP, you can condition path -queries based on beets queries (see \fB/reference/query\fP). This means that a +queries based on beets queries (see /reference/query). This means that a config file like this: .INDENT 0.0 .INDENT 3.5 @@ -794,11 +917,13 @@ timid: no log: beetslog.txt ignore: .AppleDouble ._* *~ .DS_Store +ignore_hidden: yes art_filename: albumart plugins: bpd pluginpath: ~/beets/myplugins threaded: yes -color: yes +ui: + color: yes paths: default: $genre/$albumartist/$album/$track $title @@ -813,10 +938,10 @@ .sp \fBhttp://beets.readthedocs.org/\fP .sp -\fIbeet(1)\fP +\fBbeet(1)\fP .SH AUTHOR Adrian Sampson .SH COPYRIGHT -2012, Adrian Sampson +2016, Adrian Sampson .\" Generated by docutils manpage writer. . diff -Nru beets-1.3.8+dfsg/MANIFEST.in beets-1.3.19/MANIFEST.in --- beets-1.3.8+dfsg/MANIFEST.in 2014-03-26 21:43:57.000000000 +0000 +++ beets-1.3.19/MANIFEST.in 2016-06-20 01:53:12.000000000 +0000 @@ -1,6 +1,8 @@ # Include tests (but avoid including *.pyc, etc.) prune test recursive-include test/rsrc * +recursive-exclude test/rsrc *.pyc +recursive-exclude test/rsrc *.pyo include test/*.py # Include relevant text files. diff -Nru beets-1.3.8+dfsg/PKG-INFO beets-1.3.19/PKG-INFO --- beets-1.3.8+dfsg/PKG-INFO 2014-09-18 02:18:17.000000000 +0000 +++ beets-1.3.19/PKG-INFO 2016-06-26 00:52:53.000000000 +0000 @@ -1,19 +1,23 @@ Metadata-Version: 1.1 Name: beets -Version: 1.3.8 +Version: 1.3.19 Summary: music tagger and library organizer -Home-page: http://beets.radbox.org/ +Home-page: http://beets.io/ Author: Adrian Sampson Author-email: adrian@radbox.org License: MIT -Description: .. image:: https://travis-ci.org/sampsyo/beets.svg?branch=master - :target: https://travis-ci.org/sampsyo/beets +Description: .. image:: http://img.shields.io/pypi/v/beets.svg + :target: https://pypi.python.org/pypi/beets - .. image:: http://img.shields.io/coveralls/sampsyo/beets.svg - :target: https://coveralls.io/r/sampsyo/beets + .. image:: https://img.shields.io/pypi/dw/beets.svg + :target: https://pypi.python.org/pypi/beets#downloads + + .. image:: http://img.shields.io/codecov/c/github/beetbox/beets.svg + :target: https://codecov.io/github/beetbox/beets + + .. image:: https://travis-ci.org/beetbox/beets.svg?branch=master + :target: https://travis-ci.org/beetbox/beets - .. image:: http://img.shields.io/pypi/v/beets.svg - :target: https://pypi.python.org/pypi/beets Beets is the media library management system for obsessive-compulsive music geeks. @@ -39,7 +43,7 @@ - Fetch or calculate all the metadata you could possibly need: `album art`_, `lyrics`_, `genres`_, `tempos`_, `ReplayGain`_ levels, or `acoustic fingerprints`_. - - Get metadata from `MusicBrainz`_, `Discogs`_, or `Beatport`_. Or guess + - Get metadata from `MusicBrainz`_, `Discogs`_, and `Beatport`_. Or guess metadata using songs' filenames or their acoustic fingerprints. - `Transcode audio`_ to any format you like. - Check your library for `duplicate tracks and albums`_ or for `albums that @@ -68,16 +72,16 @@ http://beets.readthedocs.org/page/plugins/duplicates.html .. _Transcode audio: http://beets.readthedocs.org/page/plugins/convert.html - .. _Beatport: http://www.beatport.com/ .. _Discogs: http://www.discogs.com/ .. _acoustic fingerprints: http://beets.readthedocs.org/page/plugins/chroma.html .. _ReplayGain: http://beets.readthedocs.org/page/plugins/replaygain.html - .. _tempos: http://beets.readthedocs.org/page/plugins/echonest_tempo.html + .. _tempos: http://beets.readthedocs.org/page/plugins/acousticbrainz.html .. _genres: http://beets.readthedocs.org/page/plugins/lastgenre.html .. _album art: http://beets.readthedocs.org/page/plugins/fetchart.html .. _lyrics: http://beets.readthedocs.org/page/plugins/lyrics.html .. _MusicBrainz: http://musicbrainz.org/ + .. _Beatport: https://www.beatport.com Read More --------- @@ -88,7 +92,7 @@ You can install beets by typing ``pip install beets``. Then check out the `Getting Started`_ guide. - .. _its Web site: http://beets.radbox.org/ + .. _its Web site: http://beets.io/ .. _Getting Started: http://beets.readthedocs.org/page/guides/main.html .. _@b33ts: http://twitter.com/b33ts/ @@ -108,5 +112,4 @@ Classifier: Environment :: Console Classifier: Environment :: Web Environment Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 diff -Nru beets-1.3.8+dfsg/README.rst beets-1.3.19/README.rst --- beets-1.3.8+dfsg/README.rst 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/README.rst 2016-06-26 00:44:08.000000000 +0000 @@ -1,12 +1,16 @@ -.. image:: https://travis-ci.org/sampsyo/beets.svg?branch=master - :target: https://travis-ci.org/sampsyo/beets - -.. image:: http://img.shields.io/coveralls/sampsyo/beets.svg - :target: https://coveralls.io/r/sampsyo/beets - .. image:: http://img.shields.io/pypi/v/beets.svg :target: https://pypi.python.org/pypi/beets +.. image:: https://img.shields.io/pypi/dw/beets.svg + :target: https://pypi.python.org/pypi/beets#downloads + +.. image:: http://img.shields.io/codecov/c/github/beetbox/beets.svg + :target: https://codecov.io/github/beetbox/beets + +.. image:: https://travis-ci.org/beetbox/beets.svg?branch=master + :target: https://travis-ci.org/beetbox/beets + + Beets is the media library management system for obsessive-compulsive music geeks. @@ -31,7 +35,7 @@ - Fetch or calculate all the metadata you could possibly need: `album art`_, `lyrics`_, `genres`_, `tempos`_, `ReplayGain`_ levels, or `acoustic fingerprints`_. -- Get metadata from `MusicBrainz`_, `Discogs`_, or `Beatport`_. Or guess +- Get metadata from `MusicBrainz`_, `Discogs`_, and `Beatport`_. Or guess metadata using songs' filenames or their acoustic fingerprints. - `Transcode audio`_ to any format you like. - Check your library for `duplicate tracks and albums`_ or for `albums that @@ -60,16 +64,16 @@ http://beets.readthedocs.org/page/plugins/duplicates.html .. _Transcode audio: http://beets.readthedocs.org/page/plugins/convert.html -.. _Beatport: http://www.beatport.com/ .. _Discogs: http://www.discogs.com/ .. _acoustic fingerprints: http://beets.readthedocs.org/page/plugins/chroma.html .. _ReplayGain: http://beets.readthedocs.org/page/plugins/replaygain.html -.. _tempos: http://beets.readthedocs.org/page/plugins/echonest_tempo.html +.. _tempos: http://beets.readthedocs.org/page/plugins/acousticbrainz.html .. _genres: http://beets.readthedocs.org/page/plugins/lastgenre.html .. _album art: http://beets.readthedocs.org/page/plugins/fetchart.html .. _lyrics: http://beets.readthedocs.org/page/plugins/lyrics.html .. _MusicBrainz: http://musicbrainz.org/ +.. _Beatport: https://www.beatport.com Read More --------- @@ -80,7 +84,7 @@ You can install beets by typing ``pip install beets``. Then check out the `Getting Started`_ guide. -.. _its Web site: http://beets.radbox.org/ +.. _its Web site: http://beets.io/ .. _Getting Started: http://beets.readthedocs.org/page/guides/main.html .. _@b33ts: http://twitter.com/b33ts/ diff -Nru beets-1.3.8+dfsg/setup.cfg beets-1.3.19/setup.cfg --- beets-1.3.8+dfsg/setup.cfg 2014-09-18 02:18:17.000000000 +0000 +++ beets-1.3.19/setup.cfg 2016-06-26 00:52:53.000000000 +0000 @@ -1,9 +1,11 @@ [nosetests] verbosity = 1 logging-clear-handlers = 1 +eval-attr = "!=slow" [flake8] -ignore = E241,E221 +min-version = 2.7 +ignore = C901,E241,E221,E731,F405,FI50,FI51,FI12,FI53,FI14,FI15 [egg_info] tag_build = diff -Nru beets-1.3.8+dfsg/setup.py beets-1.3.19/setup.py --- beets-1.3.8+dfsg/setup.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/setup.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,7 +1,8 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,6 +15,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + import os import sys import subprocess @@ -26,14 +29,16 @@ return open(path).read() -# Build manpages if we're making a source distribution tarball. -if 'sdist' in sys.argv: +def build_manpages(): # Go into the docs directory and build the manpage. docdir = os.path.join(os.path.dirname(__file__), 'docs') curdir = os.getcwd() os.chdir(docdir) try: subprocess.check_call(['make', 'man']) + except OSError: + print("Could not build manpages (make man failed)!", file=sys.stderr) + return finally: os.chdir(curdir) @@ -43,13 +48,19 @@ shutil.rmtree(mandir) shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir) + +# Build manpages if we're making a source distribution tarball. +if 'sdist' in sys.argv: + build_manpages() + + setup( name='beets', - version='1.3.8', + version='1.3.19', description='music tagger and library organizer', author='Adrian Sampson', author_email='adrian@radbox.org', - url='http://beets.radbox.org/', + url='http://beets.io/', license='MIT', platforms='ALL', long_description=_read('README.rst'), @@ -66,6 +77,7 @@ 'beetsplug.bpd', 'beetsplug.web', 'beetsplug.lastgenre', + 'beetsplug.metasync', ], entry_points={ 'console_scripts': [ @@ -74,40 +86,41 @@ }, install_requires=[ - 'enum34', - 'mutagen>=1.23', + 'enum34>=1.0.4', + 'mutagen>=1.27', 'munkres', 'unidecode', 'musicbrainzngs>=0.4', 'pyyaml', - ] - + (['colorama'] if (sys.platform == 'win32') else []) - + (['ordereddict'] if sys.version_info < (2, 7, 0) else []), + 'jellyfish', + ] + (['colorama'] if (sys.platform == 'win32') else []), tests_require=[ - 'responses', - 'pyechonest', - 'mock', + 'beautifulsoup4', 'flask', - 'rarfile', + 'mock', 'pylast', + 'rarfile', + 'responses', + 'pyxdg', + 'pathlib', + 'python-mpd2', ], # Plugin (optional) dependencies: extras_require={ - 'beatport': ['requests'], 'fetchart': ['requests'], 'chroma': ['pyacoustid'], - 'discogs': ['discogs-client>=2.0.0'], - 'echonest': ['pyechonest'], - 'echonest_tempo': ['pyechonest'], + 'discogs': ['discogs-client>=2.1.0'], + 'beatport': ['requests-oauthlib>=0.6.1'], 'lastgenre': ['pylast'], - 'mpdstats': ['python-mpd'], - 'web': ['flask'], + 'mpdstats': ['python-mpd2'], + 'web': ['flask', 'flask-cors'], 'import': ['rarfile'], + 'thumbnails': ['pathlib', 'pyxdg'], + 'metasync': ['dbus-python'], }, # Non-Python/non-PyPI plugin dependencies: - # replaygain: mp3gain || aacgain # convert: ffmpeg # bpd: pygst @@ -118,7 +131,6 @@ 'Environment :: Console', 'Environment :: Web Environment', 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], ) diff -Nru beets-1.3.8+dfsg/test/_common.py beets-1.3.19/test/_common.py --- beets-1.3.8+dfsg/test/_common.py 2014-09-16 21:17:49.000000000 +0000 +++ beets-1.3.19/test/_common.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,25 +14,23 @@ # included in all copies or substantial portions of the Software. """Some common functionality for beets' test cases.""" +from __future__ import division, absolute_import, print_function + import time import sys import os -import logging import tempfile import shutil +import unittest from contextlib import contextmanager -# Use unittest2 on Python < 2.7. -try: - import unittest2 as unittest -except ImportError: - import unittest # Mangle the search path to include the beets sources. -sys.path.insert(0, '..') +sys.path.insert(0, '..') # noqa import beets.library -from beets import importer +from beets import importer, logging from beets.ui import commands +from beets import util import beets # Make sure the development versions of the plugins are used @@ -41,7 +40,8 @@ )] # Test resources path. -RSRC = os.path.join(os.path.dirname(__file__), 'rsrc') +RSRC = util.bytestring_path(os.path.join(os.path.dirname(__file__), 'rsrc')) +PLUGINPATH = os.path.join(os.path.dirname(__file__), 'rsrc', 'beetsplug') # Propagate to root loger so nosetest can capture it log = logging.getLogger('beets') @@ -51,6 +51,9 @@ # Dummy item creation. _item_ident = 0 +# OS feature test. +HAVE_SYMLINK = hasattr(os, 'symlink') + def item(lib=None): global _item_ident @@ -74,7 +77,7 @@ comments=u'the comments', bpm=8, comp=True, - path='somepath' + str(_item_ident), + path='somepath{0}'.format(_item_ident), length=60.0, bitrate=128000, format='FLAC', @@ -96,11 +99,11 @@ _item_ident += 1 i = beets.library.Album( artpath=None, - albumartist='some album artist', - albumartist_sort='some sort album artist', - albumartist_credit='some album artist credit', - album='the album', - genre='the genre', + albumartist=u'some album artist', + albumartist_sort=u'some sort album artist', + albumartist_credit=u'some album artist credit', + album=u'the album', + genre=u'the genre', year=2014, month=2, day=5, @@ -116,14 +119,35 @@ # Dummy import session. -def import_session(lib=None, logfile=None, paths=[], query=[], cli=False): +def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False): cls = commands.TerminalImportSession if cli else importer.ImportSession - return cls(lib, logfile, paths, query) + return cls(lib, loghandler, paths, query) + + +class Assertions(object): + """A mixin with additional unit test assertions.""" + + def assertExists(self, path): # noqa + self.assertTrue(os.path.exists(util.syspath(path)), + u'file does not exist: {!r}'.format(path)) + + def assertNotExists(self, path): # noqa + self.assertFalse(os.path.exists(util.syspath(path)), + u'file exists: {!r}'.format((path))) + + def assert_equal_path(self, a, b): + """Check that two paths are equal.""" + # The common case. + if a == b: + return + + self.assertEqual(util.normpath(a), util.normpath(b), + u'paths are not equal: {!r} and {!r}'.format(a, b)) # A test harness for all beets tests. # Provides temporary, isolated configuration. -class TestCase(unittest.TestCase): +class TestCase(unittest.TestCase, Assertions): """A unittest.TestCase subclass that saves and restores beets' global configuration. This allows tests to make temporary modifications that will then be automatically removed when the test @@ -162,14 +186,6 @@ beets.config.clear() beets.config._materialized = False - def assertExists(self, path): - self.assertTrue(os.path.exists(path), - 'file does not exist: %s' % path) - - def assertNotExists(self, path): - self.assertFalse(os.path.exists(path), - 'file exists: %s' % path) - class LibTestCase(TestCase): """A test case that includes an in-memory library object (`lib`) and @@ -222,7 +238,7 @@ def __str__(self): msg = "Attempt to read with no input provided." if self.output is not None: - msg += " Output: %s" % self.output + msg += " Output: {!r}".format(self.output) return msg @@ -236,7 +252,7 @@ self.buf.append(s) def get(self): - return ''.join(self.buf) + return b''.join(self.buf) def clear(self): self.buf = [] @@ -251,7 +267,7 @@ self.out = out def add(self, s): - self.buf.append(s + '\n') + self.buf.append(s + b'\n') def readline(self): if not self.buf: @@ -340,3 +356,11 @@ yield finally: platform.system = old_system + + +def slow_test(unused=None): + def _id(obj): + return obj + if 'SKIP_SLOW_TESTS' in os.environ: + return unittest.skip(u'test is slow') + return _id diff -Nru beets-1.3.8+dfsg/test/helper.py beets-1.3.19/test/helper.py --- beets-1.3.8+dfsg/test/helper.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/test/helper.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,7 +16,7 @@ """This module includes various helpers that provide fixtures, capture information or mock the environment. -- The `control_stdin` and `capture_output` context managers allow one to +- The `control_stdin` and `capture_stdout` context managers allow one to interact with the user interface. - `has_program` checks the presence of a command on the system. @@ -30,6 +31,8 @@ """ +from __future__ import division, absolute_import, print_function + import sys import os import os.path @@ -41,15 +44,39 @@ from enum import Enum import beets +from beets import logging from beets import config import beets.plugins from beets.library import Library, Item, Album from beets import importer from beets.autotag.hooks import AlbumInfo, TrackInfo -from beets.mediafile import MediaFile +from beets.mediafile import MediaFile, Image +from beets.ui import _arg_encoding +from beets import util # TODO Move AutotagMock here -import _common +from test import _common + + +class LogCapture(logging.Handler): + + def __init__(self): + logging.Handler.__init__(self) + self.messages = [] + + def emit(self, record): + self.messages.append(unicode(record.msg)) + + +@contextmanager +def capture_log(logger='beets'): + capture = LogCapture() + log = logging.getLogger(logger) + log.addHandler(capture) + try: + yield capture.messages + finally: + log.removeHandler(capture) @contextmanager @@ -92,12 +119,18 @@ def has_program(cmd, args=['--version']): """Returns `True` if `cmd` can be executed. """ + full_cmd = [cmd] + args + for i, elem in enumerate(full_cmd): + if isinstance(elem, unicode): + full_cmd[i] = elem.encode(_arg_encoding()) try: with open(os.devnull, 'wb') as devnull: - subprocess.check_call([cmd] + args, stderr=devnull, + subprocess.check_call(full_cmd, stderr=devnull, stdout=devnull, stdin=devnull) except OSError: return False + except subprocess.CalledProcessError: + return False else: return True @@ -140,11 +173,11 @@ self.config.read() self.config['plugins'] = [] - self.config['verbose'] = True - self.config['color'] = False + self.config['verbose'] = 1 + self.config['ui']['color'] = False self.config['threaded'] = False - self.libdir = os.path.join(self.temp_dir, 'libdir') + self.libdir = os.path.join(self.temp_dir, b'libdir') os.mkdir(self.libdir) self.config['directory'] = self.libdir @@ -155,7 +188,7 @@ self.lib = Library(dbpath, self.libdir) def teardown_beets(self): - del self.lib._connections + self.lib._close() if 'BEETSDIR' in os.environ: del os.environ['BEETSDIR'] self.remove_temp_dir() @@ -172,20 +205,21 @@ beets.config['plugins'] = plugins beets.plugins.load_plugins(plugins) beets.plugins.find_plugins() - Item._types = beets.plugins.types(Item) - Album._types = beets.plugins.types(Album) + # Take a backup of the original _types to restore when unloading + Item._original_types = dict(Item._types) + Album._original_types = dict(Album._types) + Item._types.update(beets.plugins.types(Item)) + Album._types.update(beets.plugins.types(Album)) def unload_plugins(self): """Unload all plugins and remove the from the configuration. """ # FIXME this should eventually be handled by a plugin manager beets.config['plugins'] = [] - for plugin in beets.plugins._classes: - plugin.listeners = None beets.plugins._classes = set() beets.plugins._instances = {} - Item._types = {} - Album._types = {} + Item._types = Item._original_types + Album._types = Album._original_types def create_importer(self, item_count=1, album_count=1): """Create files to import and return corresponding session. @@ -193,13 +227,13 @@ Copies the specified number of files to a subdirectory of `self.temp_dir` and creates a `TestImportSession` for this path. """ - import_dir = os.path.join(self.temp_dir, 'import') + import_dir = os.path.join(self.temp_dir, b'import') if not os.path.isdir(import_dir): os.mkdir(import_dir) album_no = 0 while album_count: - album = u'album {0}'.format(album_no) + album = util.bytestring_path(u'album {0}'.format(album_no)) album_dir = os.path.join(import_dir, album) if os.path.exists(album_dir): album_no += 1 @@ -210,9 +244,10 @@ track_no = 0 album_item_count = item_count while album_item_count: - title = 'track {0}'.format(track_no) - src = os.path.join(_common.RSRC, 'full.mp3') - dest = os.path.join(album_dir, '{0}.mp3'.format(title)) + title = u'track {0}'.format(track_no) + src = os.path.join(_common.RSRC, b'full.mp3') + title_file = util.bytestring_path('{0}.mp3'.format(title)) + dest = os.path.join(album_dir, title_file) if os.path.exists(dest): track_no += 1 continue @@ -233,7 +268,7 @@ config['import']['autotag'] = False config['import']['resume'] = False - return TestImportSession(self.lib, logfile=None, query=None, + return TestImportSession(self.lib, loghandler=None, query=None, paths=[import_dir]) # Library fixtures methods @@ -273,11 +308,19 @@ If `path` is not set in `values` it is set to `item.destination()`. """ + # When specifying a path, store it normalized (as beets does + # ordinarily). + if 'path' in values: + values['path'] = util.normpath(values['path']) + item = self.create_item(**values) item.add(self.lib) + + # Ensure every item has a path. if 'path' not in values: item['path'] = item.destination() item.store() + return item def add_item_fixture(self, **values): @@ -285,7 +328,8 @@ """ item = self.create_item(**values) extension = item['format'].lower() - item['path'] = os.path.join(_common.RSRC, 'min.' + extension) + item['path'] = os.path.join(_common.RSRC, + util.bytestring_path('min.' + extension)) item.add(self.lib) item.move(copy=True) item.store() @@ -300,9 +344,9 @@ """ # TODO base this on `add_item()` items = [] - path = os.path.join(_common.RSRC, 'full.' + ext) + path = os.path.join(_common.RSRC, util.bytestring_path('full.' + ext)) for i in range(count): - item = Item.from_path(str(path)) + item = Item.from_path(path) item.album = u'\u00e4lbum {0}'.format(i) # Check unicode paths item.title = u't\u00eftle {0}'.format(i) item.add(self.lib) @@ -315,9 +359,9 @@ """Add an album with files to the database. """ items = [] - path = os.path.join(_common.RSRC, 'full.' + ext) + path = os.path.join(_common.RSRC, util.bytestring_path('full.' + ext)) for i in range(track_count): - item = Item.from_path(str(path)) + item = Item.from_path(bytes(path)) item.album = u'\u00e4lbum' # Check unicode paths item.title = u't\u00eftle {0}'.format(i) item.add(self.lib) @@ -326,18 +370,33 @@ items.append(item) return self.lib.add_album(items) - def create_mediafile_fixture(self, ext='mp3'): + def create_mediafile_fixture(self, ext='mp3', images=[]): """Copies a fixture mediafile with the extension to a temporary location and returns the path. It keeps track of the created locations and will delete the with `remove_mediafile_fixtures()` + + `images` is a subset of 'png', 'jpg', and 'tiff'. For each + specified extension a cover art image is added to the media + file. """ - src = os.path.join(_common.RSRC, 'full.' + ext) + src = os.path.join(_common.RSRC, util.bytestring_path('full.' + ext)) handle, path = mkstemp() os.close(handle) shutil.copyfile(src, path) + if images: + mediafile = MediaFile(path) + imgs = [] + for img_ext in images: + file = util.bytestring_path('image-2x3.{0}'.format(img_ext)) + img_path = os.path.join(_common.RSRC, file) + with open(img_path, 'rb') as f: + imgs.append(Image(f.read())) + mediafile.images = imgs + mediafile.save() + if not hasattr(self, '_mediafile_fixtures'): self._mediafile_fixtures = [] self._mediafile_fixtures.append(path) @@ -367,7 +426,7 @@ def run_with_output(self, *args): with capture_stdout() as out: self.run_command(*args) - return out.getvalue() + return out.getvalue().decode('utf-8') # Safe file operations @@ -397,9 +456,9 @@ parent = os.path.dirname(path) if not os.path.isdir(parent): - os.makedirs(parent) + os.makedirs(util.syspath(parent)) - with open(path, 'a+') as f: + with open(util.syspath(path), 'a+') as f: f.write(content) return path @@ -478,14 +537,14 @@ """ tracks = [generate_track_info(id) for id in track_ids] album = AlbumInfo( - album_id='album info', - album='album info', - artist='album info', - artist_id='album info', + album_id=u'album info', + album=u'album info', + artist=u'album info', + artist_id=u'album info', tracks=tracks, ) for field in ALBUM_INFO_FIELDS: - setattr(album, field, 'album info') + setattr(album, field, u'album info') return album @@ -504,11 +563,11 @@ string fields are set to "track info". """ track = TrackInfo( - title='track info', + title=u'track info', track_id=track_id, ) for field in TRACK_INFO_FIELDS: - setattr(track, field, 'track info') + setattr(track, field, u'track info') for field, value in values.items(): setattr(track, field, value) return track diff -Nru beets-1.3.8+dfsg/test/__init__.py beets-1.3.19/test/__init__.py --- beets-1.3.8+dfsg/test/__init__.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/__init__.py 2016-06-20 01:53:12.000000000 +0000 @@ -1 +1,5 @@ +# -*- coding: utf-8 -*- + # Make python -m testall.py work. + +from __future__ import division, absolute_import, print_function diff -Nru beets-1.3.8+dfsg/test/lyrics_download_samples.py beets-1.3.19/test/lyrics_download_samples.py --- beets-1.3.8+dfsg/test/lyrics_download_samples.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/lyrics_download_samples.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import os +import sys +import requests + +import test_lyrics + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError: + if os.path.isdir(path): + pass + else: + raise + + +def safe_open_w(path): + """Open "path" for writing, creating any parent directories as needed. + """ + mkdir_p(os.path.dirname(path)) + return open(path, 'w') + + +def main(argv=None): + """Download one lyrics sample page per referenced source. + """ + if argv is None: + argv = sys.argv + print(u'Fetching samples from:') + for s in test_lyrics.GOOGLE_SOURCES + test_lyrics.DEFAULT_SOURCES: + print(s['url']) + url = s['url'] + s['path'] + fn = test_lyrics.url_to_filename(url) + if not os.path.isfile(fn): + html = requests.get(url, verify=False).text + with safe_open_w(fn) as f: + f.write(html.encode('utf8')) + +if __name__ == "__main__": + sys.exit(main()) diff -Nru beets-1.3.8+dfsg/test/lyrics_sources.py beets-1.3.19/test/lyrics_sources.py --- beets-1.3.8+dfsg/test/lyrics_sources.py 2014-09-14 20:14:35.000000000 +0000 +++ beets-1.3.19/test/lyrics_sources.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,186 +0,0 @@ -# This file is part of beets. -# Copyright 2014, Fabrice Laporte. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Tests for the 'lyrics' plugin""" - -import os -import logging -import _common -from _common import unittest -from beetsplug import lyrics -from beets import config -from beets.util import confit -from bs4 import BeautifulSoup - -log = logging.getLogger('beets') -LYRICS_TEXTS = confit.load_yaml(os.path.join(_common.RSRC, 'lyricstext.yaml')) - -try: - googlekey = config['lyrics']['google_API_key'].get(unicode) -except confit.NotFoundError: - googlekey = None - -# default query for tests -definfo = dict(artist=u'The Beatles', title=u'Lady Madonna') - - -class MockFetchUrl(object): - def __init__(self, pathval='fetched_path'): - self.pathval = pathval - self.fetched = None - - def __call__(self, url, filename=None): - self.fetched = url - url = url.replace('http://', '').replace('www.', '') - fn = "".join(x for x in url if (x.isalnum() or x == '/')) - fn = fn.split('/') - fn = os.path.join('rsrc', 'lyrics', fn[0], fn[-1]) + '.txt' - with open(fn, 'r') as f: - content = f.read() - return content - - -def is_lyrics_content_ok(title, text): - """Compare lyrics text to expected lyrics for given title""" - - setexpected = set(LYRICS_TEXTS[lyrics.slugify(title)].split()) - settext = set(text.split()) - setinter = setexpected.intersection(settext) - # consider lyrics ok if they share 50% or more with the reference - if len(setinter): - ratio = 1.0 * max(len(setexpected), len(settext)) / len(setinter) - return (ratio > .5 and ratio < 2) - return False - - -class LyricsPluginTest(unittest.TestCase): - def setUp(self): - """Set up configuration""" - lyrics.LyricsPlugin() - - def test_default_ok(self): - """Test each lyrics engine with the default query""" - - lyrics.fetch_url = MockFetchUrl() - - for f in (lyrics.fetch_lyricswiki, lyrics.fetch_lyricscom): - res = f(definfo['artist'], definfo['title']) - self.assertTrue(lyrics.is_lyrics(res)) - self.assertTrue(is_lyrics_content_ok(definfo['title'], res)) - - def test_missing_lyrics(self): - self.assertFalse(lyrics.is_lyrics(LYRICS_TEXTS['missing_texts'])) - - -class LyricsScrapingPluginTest(unittest.TestCase): - - # Every source entered in default beets google custom search engine - # must be listed below. - # Use default query when possible, or override artist and title field - # if website don't have lyrics for default query. - sourcesOk = [ - dict(definfo, url=u'http://www.smartlyrics.com', - path=u'/Song18148-The-Beatles-Lady-Madonna-lyrics.aspx'), - dict(definfo, url=u'http://www.elyricsworld.com', - path=u'/lady_madonna_lyrics_beatles.html'), - dict(artist=u'Beres Hammond', title=u'I could beat myself', - url=u'http://www.reggaelyrics.info', - path=u'/beres-hammond/i-could-beat-myself'), - dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok", - url=u'http://www.lyricsmania.com', - path=u'/hey_its_ok_lyrics_lilly_wood_and_the_prick.html'), - dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok", - url=u'http://www.paroles.net/', - path=u'lilly-wood-the-prick/paroles-hey-it-s-ok'), - dict(definfo, artist=u'Amy Winehouse', title=u"Jazz'n'blues", - url=u'http://www.lyricsontop.com', - path=u'/amy-winehouse-songs/jazz-n-blues-lyrics.html'), - dict(definfo, url=u'http://www.sweetslyrics.com', - path=u'/761696.The%20Beatles%20-%20Lady%20Madonna.html'), - dict(definfo, url=u'http://www.lyrics007.com', - path=u'/The%20Beatles%20Lyrics/Lady%20Madonna%20Lyrics.html'), - dict(definfo, url=u'http://www.absolutelyrics.com', - path=u'/lyrics/view/the_beatles/lady_madonna'), - dict(definfo, url=u'http://www.azlyrics.com/', - path=u'/lyrics/beatles/ladymadonna.html'), - dict(definfo, url=u'http://www.chartlyrics.com', - path=u'/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx'), - dict(definfo, url='http://www.releaselyrics.com', - path=u'/e35f/the-beatles-lady-madonna'), - ] - - # Websites that can't be scraped yet and whose results must be - # flagged as invalid lyrics. - sourcesFail = [ - dict(definfo, url='http://www.songlyrics.com', - path=u'/the-beatles/lady-madonna-lyrics'), - dict(definfo, url='http://www.metrolyrics.com/', - path='best-for-last-lyrics-adele.html') - ] - - # Websites that return truncated lyrics because of scraping issues, and - # thus should not be included as sources to Google CSE. - # They are good candidates for later inclusion after improvement - # iterations of the scraping algorithm. - sourcesIncomplete = [ - dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok", - url=u'http://www.lacoccinelle.net', - path=u'/paroles-officielles/550512.html'), - ] - - def test_sources_ok(self): - for s in self.sourcesOk: - url = s['url'] + s['path'] - res = lyrics.scrape_lyrics_from_url(url) - self.assertTrue(lyrics.is_lyrics(res), url) - self.assertTrue(is_lyrics_content_ok(s['title'], res), url) - - def test_sources_fail(self): - for s in self.sourcesFail: - url = s['url'] + s['path'] - res = lyrics.scrape_lyrics_from_url(url) - # very unlikely these sources pass if the scraping algo is not - # tweaked on purpose for these cases - self.assertFalse(lyrics.is_lyrics(res), "%s => %s" % (url, res)) - - def test_sources_incomplete(self): - for s in self.sourcesIncomplete: - url = s['url'] + s['path'] - res = lyrics.scrape_lyrics_from_url(url) - - self.assertTrue(lyrics.is_lyrics(res)) - # these sources may pass if the html source evolve or after - # a random improvement in the scraping algo: we want to - # be noticed if it's the case. - if is_lyrics_content_ok(s['title'], res): - log.debug(u'Source {0} actually return valid lyrics!' - .format(s['url'])) - - def test_is_page_candidate(self): - for s in self.sourcesOk: - url = unicode(s['url'] + s['path']) - html = lyrics.fetch_url(url) - soup = BeautifulSoup(html) - if not soup.title: - continue - self.assertEqual(lyrics.is_page_candidate(url, soup.title.string, - s['title'], s['artist']), - True, url) - - -def suite(): - return unittest.TestLoader().loadTestsFromName(__name__) - -if __name__ == '__main__': - unittest.main(defaultTest='suite') Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/abbey-different.jpg and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/abbey-different.jpg differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/abbey.jpg and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/abbey.jpg differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/abbey-similar.jpg and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/abbey-similar.jpg differ diff -Nru beets-1.3.8+dfsg/test/rsrc/beetsplug/test.py beets-1.3.19/test/rsrc/beetsplug/test.py --- beets-1.3.8+dfsg/test/rsrc/beetsplug/test.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/rsrc/beetsplug/test.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,3 +1,7 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + from beets.plugins import BeetsPlugin from beets import ui @@ -12,7 +16,7 @@ test.func = lambda *args: None # Used in CompletionTest - test.parser.add_option('-o', '--option', dest='my_opt') + test.parser.add_option(u'-o', u'--option', dest='my_opt') plugin = ui.Subcommand('plugin') plugin.func = lambda *args: None Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/beetsplug/test.pyc and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/beetsplug/test.pyc differ diff -Nru beets-1.3.8+dfsg/test/rsrc/convert_stub.py beets-1.3.19/test/rsrc/convert_stub.py --- beets-1.3.8+dfsg/test/rsrc/convert_stub.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/convert_stub.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +"""A tiny tool used to test the `convert` plugin. It copies a file and appends +a specified text tag. +""" + +from __future__ import division, absolute_import, print_function +import sys +import platform + + +def convert(in_file, out_file, tag): + """Copy `in_file` to `out_file` and append the string `tag`. + """ + # On Python 3, encode the tag argument as bytes. + if not isinstance(tag, bytes): + tag = tag.encode('utf8') + + # On Windows, use Unicode paths. (The test harness gives them to us + # as UTF-8 bytes.) + if platform.system() == 'Windows': + in_file = in_file.decode('utf8') + out_file = out_file.decode('utf8') + + with open(out_file, 'wb') as out_f: + with open(in_file, 'rb') as in_f: + out_f.write(in_f.read()) + out_f.write(tag) + + +if __name__ == '__main__': + convert(sys.argv[1], sys.argv[2], sys.argv[3]) Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/date_with_slashes.ogg and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/date_with_slashes.ogg differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/image.ape and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/image.ape differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/image_unknown_type.mp3 and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/image_unknown_type.mp3 differ diff -Nru beets-1.3.8+dfsg/test/rsrc/itunes_library_unix.xml beets-1.3.19/test/rsrc/itunes_library_unix.xml --- beets-1.3.8+dfsg/test/rsrc/itunes_library_unix.xml 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/itunes_library_unix.xml 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,167 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>Major Version</key><integer>1</integer> + <key>Minor Version</key><integer>1</integer> + <key>Date</key><date>2015-05-08T14:36:28Z</date> + <key>Application Version</key><string>12.1.2.27</string> + <key>Features</key><integer>5</integer> + <key>Show Content Ratings</key><true/> + <key>Music Folder</key><string>file:////Music/</string> + <key>Library Persistent ID</key><string>1ABA8417E4946A32</string> + <key>Tracks</key> + <dict> + <key>634</key> + <dict> + <key>Track ID</key><integer>634</integer> + <key>Name</key><string>Tessellate</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Genre</key><string>Alternative</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>5525212</integer> + <key>Total Time</key><integer>182674</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>3</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-02-02T15:23:08Z</date> + <key>Date Added</key><date>2014-04-24T09:28:38Z</date> + <key>Bit Rate</key><integer>238</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>0</integer> + <key>Play Date</key><integer>3513593824</integer> + <key>Skip Count</key><integer>3</integer> + <key>Skip Date</key><date>2015-02-05T15:41:04Z</date> + <key>Rating</key><integer>80</integer> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>20E89D1580C31363</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file:///Music/Alt-J/An%20Awesome%20Wave/03%20Tessellate.mp3</string> + <key>File Folder Count</key><integer>4</integer> + <key>Library Folder Count</key><integer>2</integer> + </dict> + <key>636</key> + <dict> + <key>Track ID</key><integer>636</integer> + <key>Name</key><string>Breezeblocks</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Genre</key><string>Alternative</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>6827195</integer> + <key>Total Time</key><integer>227082</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>4</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-02-02T15:23:08Z</date> + <key>Date Added</key><date>2014-04-24T09:28:38Z</date> + <key>Bit Rate</key><integer>237</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>31</integer> + <key>Play Date</key><integer>3513594051</integer> + <key>Play Date UTC</key><date>2015-05-04T12:20:51Z</date> + <key>Skip Count</key><integer>0</integer> + <key>Rating</key><integer>100</integer> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>D7017B127B983D38</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file://localhost/Music/Alt-J/An%20Awesome%20Wave/04%20Breezeblocks.mp3</string> + <key>File Folder Count</key><integer>4</integer> + <key>Library Folder Count</key><integer>2</integer> + </dict> + <key>638</key> + <dict> + <key>Track ID</key><integer>638</integer> + <key>Name</key><string>❦ (Ripe & Ruin)</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>2173293</integer> + <key>Total Time</key><integer>72097</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>2</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-05-09T17:04:53Z</date> + <key>Date Added</key><date>2015-02-02T15:28:39Z</date> + <key>Bit Rate</key><integer>233</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>8</integer> + <key>Play Date</key><integer>3514109973</integer> + <key>Play Date UTC</key><date>2015-05-10T11:39:33Z</date> + <key>Skip Count</key><integer>1</integer> + <key>Skip Date</key><date>2015-02-02T15:29:10Z</date> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>183699FA0554D0E6</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file:///Music/Alt-J/An%20Awesome%20Wave/02%20%E2%9D%A6%20(Ripe%20&%20Ruin).mp3</string> + <key>File Folder Count</key><integer>4</integer> + <key>Library Folder Count</key><integer>2</integer> + </dict> + </dict> + <key>Playlists</key> + <array> + <dict> + <key>Name</key><string>Library</string> + <key>Master</key><true/> + <key>Playlist ID</key><integer>11480</integer> + <key>Playlist Persistent ID</key><string>CD6FF684E7A6A166</string> + <key>Visible</key><false/> + <key>All Items</key><true/> + <key>Playlist Items</key> + <array> + <dict> + <key>Track ID</key><integer>634</integer> + </dict> + <dict> + <key>Track ID</key><integer>636</integer> + </dict> + <dict> + <key>Track ID</key><integer>638</integer> + </dict> + </array> + </dict> + <dict> + <key>Name</key><string>Music</string> + <key>Playlist ID</key><integer>16906</integer> + <key>Playlist Persistent ID</key><string>4FB2E64E0971DD45</string> + <key>Distinguished Kind</key><integer>4</integer> + <key>Music</key><true/> + <key>All Items</key><true/> + <key>Playlist Items</key> + <array> + <dict> + <key>Track ID</key><integer>634</integer> + </dict> + <dict> + <key>Track ID</key><integer>636</integer> + </dict> + <dict> + <key>Track ID</key><integer>638</integer> + </dict> + </array> + </dict> + </array> +</dict> +</plist> diff -Nru beets-1.3.8+dfsg/test/rsrc/itunes_library_windows.xml beets-1.3.19/test/rsrc/itunes_library_windows.xml --- beets-1.3.8+dfsg/test/rsrc/itunes_library_windows.xml 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/itunes_library_windows.xml 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,167 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>Major Version</key><integer>1</integer> + <key>Minor Version</key><integer>1</integer> + <key>Date</key><date>2015-05-11T15:27:14Z</date> + <key>Application Version</key><string>12.1.2.27</string> + <key>Features</key><integer>5</integer> + <key>Show Content Ratings</key><true/> + <key>Music Folder</key><string>file://localhost/C:/Documents%20and%20Settings/Owner/My%20Documents/My%20Music/iTunes/iTunes%20Media/</string> + <key>Library Persistent ID</key><string>B4C9F3EE26EFAF78</string> + <key>Tracks</key> + <dict> + <key>180</key> + <dict> + <key>Track ID</key><integer>180</integer> + <key>Name</key><string>Tessellate</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Genre</key><string>Alternative</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>5525212</integer> + <key>Total Time</key><integer>182674</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>3</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-02-02T15:23:08Z</date> + <key>Date Added</key><date>2014-04-24T09:28:38Z</date> + <key>Bit Rate</key><integer>238</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>0</integer> + <key>Play Date</key><integer>3513593824</integer> + <key>Skip Count</key><integer>3</integer> + <key>Skip Date</key><date>2015-02-05T15:41:04Z</date> + <key>Rating</key><integer>80</integer> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>20E89D1580C31363</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file://localhost/G:/Music/Alt-J/An%20Awesome%20Wave/03%20Tessellate.mp3</string> + <key>File Folder Count</key><integer>-1</integer> + <key>Library Folder Count</key><integer>-1</integer> + </dict> + <key>183</key> + <dict> + <key>Track ID</key><integer>183</integer> + <key>Name</key><string>Breezeblocks</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Genre</key><string>Alternative</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>6827195</integer> + <key>Total Time</key><integer>227082</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>4</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-02-02T15:23:08Z</date> + <key>Date Added</key><date>2014-04-24T09:28:38Z</date> + <key>Bit Rate</key><integer>237</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>31</integer> + <key>Play Date</key><integer>3513594051</integer> + <key>Play Date UTC</key><date>2015-05-04T12:20:51Z</date> + <key>Skip Count</key><integer>0</integer> + <key>Rating</key><integer>100</integer> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>D7017B127B983D38</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file://localhost/G:/Music/Alt-J/An%20Awesome%20Wave/04%20Breezeblocks.mp3</string> + <key>File Folder Count</key><integer>-1</integer> + <key>Library Folder Count</key><integer>-1</integer> + </dict> + <key>638</key> + <dict> + <key>Track ID</key><integer>638</integer> + <key>Name</key><string>❦ (Ripe & Ruin)</string> + <key>Artist</key><string>alt-J</string> + <key>Album Artist</key><string>alt-J</string> + <key>Album</key><string>An Awesome Wave</string> + <key>Kind</key><string>MPEG audio file</string> + <key>Size</key><integer>2173293</integer> + <key>Total Time</key><integer>72097</integer> + <key>Disc Number</key><integer>1</integer> + <key>Disc Count</key><integer>1</integer> + <key>Track Number</key><integer>2</integer> + <key>Track Count</key><integer>13</integer> + <key>Year</key><integer>2012</integer> + <key>Date Modified</key><date>2015-05-09T17:04:53Z</date> + <key>Date Added</key><date>2015-02-02T15:28:39Z</date> + <key>Bit Rate</key><integer>233</integer> + <key>Sample Rate</key><integer>44100</integer> + <key>Play Count</key><integer>8</integer> + <key>Play Date</key><integer>3514109973</integer> + <key>Play Date UTC</key><date>2015-05-10T11:39:33Z</date> + <key>Skip Count</key><integer>1</integer> + <key>Skip Date</key><date>2015-02-02T15:29:10Z</date> + <key>Album Rating</key><integer>80</integer> + <key>Album Rating Computed</key><true/> + <key>Artwork Count</key><integer>1</integer> + <key>Sort Album</key><string>Awesome Wave</string> + <key>Sort Artist</key><string>alt-J</string> + <key>Persistent ID</key><string>183699FA0554D0E6</string> + <key>Track Type</key><string>File</string> + <key>Location</key><string>file://localhost/G:/Experiments/Alt-J/An%20Awesome%20Wave/02%20%E2%9D%A6%20(Ripe%20&%20Ruin).mp3</string> + <key>File Folder Count</key><integer>4</integer> + <key>Library Folder Count</key><integer>2</integer> + </dict> + </dict> + <key>Playlists</key> + <array> + <dict> + <key>Name</key><string>Bibliotheek</string> + <key>Master</key><true/> + <key>Playlist ID</key><integer>72</integer> + <key>Playlist Persistent ID</key><string>728AA5B1D00ED23B</string> + <key>Visible</key><false/> + <key>All Items</key><true/> + <key>Playlist Items</key> + <array> + <dict> + <key>Track ID</key><integer>180</integer> + </dict> + <dict> + <key>Track ID</key><integer>183</integer> + </dict> + <dict> + <key>Track ID</key><integer>638</integer> + </dict> + </array> + </dict> + <dict> + <key>Name</key><string>Muziek</string> + <key>Playlist ID</key><integer>103</integer> + <key>Playlist Persistent ID</key><string>8120A002B0486AD7</string> + <key>Distinguished Kind</key><integer>4</integer> + <key>Music</key><true/> + <key>All Items</key><true/> + <key>Playlist Items</key> + <array> + <dict> + <key>Track ID</key><integer>180</integer> + </dict> + <dict> + <key>Track ID</key><integer>183</integer> + </dict> + <dict> + <key>Track ID</key><integer>638</integer> + </dict> + </array> + </dict> + </array> +</dict> +</plist> diff -Nru beets-1.3.8+dfsg/test/rsrc/lyrics/examplecom/beetssong.txt beets-1.3.19/test/rsrc/lyrics/examplecom/beetssong.txt --- beets-1.3.8+dfsg/test/rsrc/lyrics/examplecom/beetssong.txt 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/lyrics/examplecom/beetssong.txt 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,360 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" +"http://www.w3.org/TR/html4/loose.dtd"> +<html> +<head> +<title>John Doe - beets song Lyrics + + + + + + + + + + + + + + +

beets song Lyrics

+ + + + + + + + + + +
+
+ +
+ +
+
+
+ +
+
+ [New lyrics] +
+

John Doe beets song lyrics + + + +
+ + Lyrics + search for Artist - Song: + +
+ +
Back to the: Music Lyrics > John Doe lyrics > beets song lyrics
+ + +
+ +

John Doe
beets song lyrics

+Ringtones left icon Send "beets song" Ringtone to your Cell Ringtones right icon

Beets is the media library management system for obsessive-compulsive music geeks.
+The purpose of beets is to get your music collection right once and for all. It catalogs your collection, automatically improving its metadata as it goes. It then provides a bouquet of tools for manipulating and accessing your music.
+
+Here's an example of beets' brainy tag corrector doing its thing: +Because beets is designed as a library, it can do almost anything you can imagine for your music collection. Via plugins, beets becomes a panacea
+Ringtones left icon Send "beets song" Ringtone to your Cell Ringtones right icon

+
+ Share beets song lyrics

+ + + +
  + + RATE THIS SONG!
+ +

+ + +
+ + + + + + + + + + + + + + + + +
Add to Favorites LyricsEmail to a Friend John Doe - beets song Lyrics
Rating: +
+ + + +
Use the following form to post your meaning of this song, rate it, or submit comments about this song.


+ + 0 + +
+ + + + + + + + + + + + + + + + + + + + + + + +
Name:
Comment:
Type maps backwards (spam prevention):


There are no comments for this song yet. + + +
+
+ +
+ + + + + + +
+ + + + +
ToneFuse Music

+ + diff -Nru beets-1.3.8+dfsg/test/rsrc/lyricstext.yaml beets-1.3.19/test/rsrc/lyricstext.yaml --- beets-1.3.8+dfsg/test/rsrc/lyricstext.yaml 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/rsrc/lyricstext.yaml 2016-06-20 01:53:12.000000000 +0000 @@ -1,152 +1,42 @@ -Lady_Madonna: | - Lady Madonna, children at your feet - Wonder how you manage to make ends meet - Who finds the money? When you pay the rent? - Did you think that money was Heaven sent? - Friday night arrives without a suitcase - Sunday morning creep in like a nun - Monday's child has learned to tie his bootlace - See how they run - - Lady Madonna, baby at your breast - Wonder how you manage to feed the rest - - See how they run - - Lady Madonna, lying on the bed - Listen to the music playing in your head - - Tuesday afternoon is never ending - Wednesday morning papers didn't come - Thursday night you stockings needed mending - See how they run - - Lady Madonna, children at your feet - Wonder how you manage to make ends meet - -Jazz_n_blues: | - It's always gone within two days. - Follow my father, his extravagant ways. - So if I got it I will spend it all, - Camden and Parkway 'til I hit a wall. - - I cross my fingers at the cash machine, - As I check my balance I kiss the screen. - I love it when it says I got the means, - To go to Miss Sixty and pick up my new jeans. - - Chorus: (x2) - Never lasts me long, - Handle finance wrong, - Blow it all on bags and shoes, - Jazz 'n blues. - - Standin' too deep at the bar today, - Wait with impatience to throw my cash away. - Four white Russians a JD and Coke, - Buy their drinks all night and now I am broke. - But that's cool 'cause I can borrow more from you, - And I didn't forget about that fifty pound, too. - Tell you what, my advance is comin' through, - I'll take you out shoppin', - Can you wait 'til next June? Yeah. - - Chorus: - Never lasts me long, - Handle finance wrong (handle it wrong), - Blow it all on bags and shoes, - Jazz 'n blues (jazz and blues). - Never lasts me long (long), - Handle finance wrong (wrong), - Blow it all on bags and shoes (blow it all), - Jazz 'n blues. - -I_could_beat_myself: | - oooh ahh, I'm hurting, bad - oooh ahh I'm hurting bad! - - I did not see what I was supposed to see - taking it easy when a friend told me I was in - danger, so much danger. - Underestimating my woman, not taking her - out, working too hard and now she's gone off - with a stranger, someone I don't even know. - - I should have taken her out, every once and a - while, taken her to dinner on the finer side, shown - her a life that was all worth while, now I guese I gotta - walk an extra mile! I could beat myself! Ahh yeah. - - Now I'm gonna feel funny out there in the crowd - when my friends all ask me, where is your woman? - long time I don't see. Now I've got to think fast gotta - use my head, give a good story and make sure they - buy my version of the situation. - - All the while I wouldn't lie I'm gonna do this once - see my reputation sinking in the distance, if they - knew the truth that really existed...then my little sanity - would be wasted! I could beat myself ahh yes..I could - beat myself!!! Ohh lord lord. - - oooh ooh I'm hurting, hurting inside - oooh ooh I'm hurting - - Now I really want to hear a little news now and - then, this is not what I expect to hear from my - friend I'm dissapointed. He should realize that its - gonna destroy my position (remember I'm a name - brand) now I've gotta see....I should have held her tight - every once and a while, gotten it together on the finer style - shown her a life that was all worth while, now it seems I gotta - walk an extra mile....I could beat myself..ahh yes, I could beat myself - ooh lord - - ooh I'm hurting - ooooh I'm hurting,hurting inside - - I don't know what I wanna tell you - but I wanna tell you something real, real good yes - someting to make you - wanna shiver - -Hey_it_s_ok: | - Mama, Papa, please forget the times - I wet my bed - I swear not to do it again - Please forgive the way I looked - when I was fourteen - I didn't know who I wanted to be - - Hey It's OK, It's OK - Cause I've found what i wanted - Hey It's OK, I'ts Ok - Cause I've found what i wanted - - Friends and lovers please forgive - the mean things I've said - I swear not to do again - Please forgive the way I act when - I've had too much to drink - I'm fighting against myself - - Hey It's OK, It's OK - Cause I've found what i wanted - Hey It's OK, I'ts Ok - - Cause I've found what i wanted - - And I swear not to do anything funny anymore - yes I swear not to do anything funny anymore - - Hey It's OK, It's OK - Cause I've found what i wanted - Hey It's OK, I'ts Ok - Cause I've found what i wanted - Hey It's OK, It's OK - Cause I've found what i wanted - Hey It's OK, I'ts Ok - Cause I've found what i wanted +Beets_song: + - geeks + - bouquet + - panacea + +Amsterdam: + - oriflammes + - fortune + - batave + - pissent + +Lady_Madonna: + - heaven + - tuesday + - thursday + +Jazz_n_blues: + - parkway + - balance + - impatient + - shoes + +Hey_it_s_ok: + - swear + - forgive + - drink + - found + +City_of_dreams: + - groves + - landmarks + - twilight + - freeways + +Black_magic_woman: + - devil + - magic + - spell + - heart missing_texts: | Lyricsmania staff is working hard for you to add $TITLE lyrics as soon Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/only-magic-bytes.jpg and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/only-magic-bytes.jpg differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/pure.wma and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/pure.wma differ Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/soundcheck-nonascii.m4a and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/soundcheck-nonascii.m4a differ diff -Nru beets-1.3.8+dfsg/test/rsrc/spotify/missing_request.json beets-1.3.19/test/rsrc/spotify/missing_request.json --- beets-1.3.8+dfsg/test/rsrc/spotify/missing_request.json 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/spotify/missing_request.json 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "tracks" : { + "href" : "https://api.spotify.com/v1/search?query=duifhjslkef+album%3Alkajsdflakjsd+artist%3A&offset=0&limit=20&type=track", + "items" : [ ], + "limit" : 20, + "next" : null, + "offset" : 0, + "previous" : null, + "total" : 0 + } +} \ No newline at end of file diff -Nru beets-1.3.8+dfsg/test/rsrc/spotify/track_request.json beets-1.3.19/test/rsrc/spotify/track_request.json --- beets-1.3.8+dfsg/test/rsrc/spotify/track_request.json 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/spotify/track_request.json 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,89 @@ +{ + "tracks":{ + "href":"https://api.spotify.com/v1/search?query=Happy+album%3ADespicable+Me+2+artist%3APharrell+Williams&offset=0&limit=20&type=track", + "items":[ + { + "album":{ + "album_type":"compilation", + "available_markets":[ + "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", + "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", + "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", + "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", + "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", + "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", + "SK", "SV", "TR", "TW", "US", "UY" + ], + "external_urls":{ + "spotify":"https://open.spotify.com/album/5l3zEmMrOhOzG8d8s83GOL" + }, + "href":"https://api.spotify.com/v1/albums/5l3zEmMrOhOzG8d8s83GOL", + "id":"5l3zEmMrOhOzG8d8s83GOL", + "images":[ + { + "height":640, + "width":640, + "url":"https://i.scdn.co/image/cb7905340c132365bbaee3f17498f062858382e8" + }, + { + "height":300, + "width":300, + "url":"https://i.scdn.co/image/af369120f0b20099d6784ab31c88256113f10ffb" + }, + { + "height":64, + "width":64, + "url":"https://i.scdn.co/image/9dad385ddf2e7db0bef20cec1fcbdb08689d9ae8" + } + ], + "name":"Despicable Me 2 (Original Motion Picture Soundtrack)", + "type":"album", + "uri":"spotify:album:5l3zEmMrOhOzG8d8s83GOL" + }, + "artists":[ + { + "external_urls":{ + "spotify":"https://open.spotify.com/artist/2RdwBSPQiwcmiDo9kixcl8" + }, + "href":"https://api.spotify.com/v1/artists/2RdwBSPQiwcmiDo9kixcl8", + "id":"2RdwBSPQiwcmiDo9kixcl8", + "name":"Pharrell Williams", + "type":"artist", + "uri":"spotify:artist:2RdwBSPQiwcmiDo9kixcl8" + } + ], + "available_markets":[ + "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", + "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", + "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", + "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", + "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", + "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", + "SK", "SV", "TR", "TW", "US", "UY" + ], + "disc_number":1, + "duration_ms":233305, + "explicit":false, + "external_ids":{ + "isrc":"USQ4E1300686" + }, + "external_urls":{ + "spotify":"https://open.spotify.com/track/6NPVjNh8Jhru9xOmyQigds" + }, + "href":"https://api.spotify.com/v1/tracks/6NPVjNh8Jhru9xOmyQigds", + "id":"6NPVjNh8Jhru9xOmyQigds", + "name":"Happy", + "popularity":89, + "preview_url":"https://p.scdn.co/mp3-preview/6b00000be293e6b25f61c33e206a0c522b5cbc87", + "track_number":4, + "type":"track", + "uri":"spotify:track:6NPVjNh8Jhru9xOmyQigds" + } + ], + "limit":20, + "next":null, + "offset":0, + "previous":null, + "total":1 + } +} diff -Nru beets-1.3.8+dfsg/test/rsrc/test_completion.sh beets-1.3.19/test/rsrc/test_completion.sh --- beets-1.3.8+dfsg/test/rsrc/test_completion.sh 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/rsrc/test_completion.sh 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,185 @@ +# Function stub +compopt() { return 0; } + +initcli() { + COMP_WORDS=( "beet" "$@" ) + let COMP_CWORD=${#COMP_WORDS[@]}-1 + COMP_LINE="${COMP_WORDS[@]}" + let COMP_POINT=${#COMP_LINE} + _beet +} + +completes() { + for word in "$@"; do + [[ " ${COMPREPLY[@]} " == *[[:space:]]$word[[:space:]]* ]] || return 1 + done +} + +COMMANDS='fields import list update remove + stats version modify move write + help' + +HELP_OPTS='-h --help' + + +test_commands() { + initcli '' && + completes $COMMANDS && + + initcli -v '' && + completes $COMMANDS && + + initcli -l help '' && + completes $COMMANDS && + + initcli -d list '' && + completes $COMMANDS && + + initcli -h '' && + completes $COMMANDS && + true +} + +test_command_aliases() { + initcli ls && + completes list && + + initcli l && + ! completes ls && + + initcli im && + completes import && + true +} + +test_global_opts() { + initcli - && + completes \ + -l --library \ + -d --directory \ + -h --help \ + -c --config \ + -v --verbose && + true +} + + +test_global_file_opts() { + # FIXME somehow file completion only works when the completion + # function is called by the shell completion utilities. So we can't + # test it here + initcli --library '' && + completes $(compgen -d) && + + initcli -l '' && + completes $(compgen -d) && + + initcli --config '' && + completes $(compgen -d) && + + initcli -c '' && + completes $(compgen -d) && + true +} + + +test_global_dir_opts() { + initcli --directory '' && + completes $(compgen -d) && + + initcli -d '' && + completes $(compgen -d) && + true +} + + +test_fields_command() { + initcli fields - && + completes -h --help && + + initcli fields '' && + completes $(compgen -d) && + true +} + + +test_import_files() { + initcli import '' && + completes $(compgen -d) && + + initcli import --copy -P '' && + completes $(compgen -d) && + + initcli import --log '' && + completes $(compgen -d) && + true +} + + +test_import_options() { + initcli imp - + completes \ + -h --help \ + -c --copy -C --nocopy \ + -w --write -W --nowrite \ + -a --autotag -A --noautotag \ + -p --resume -P --noresume \ + -l --log --flat +} + + +test_list_options() { + initcli list - + completes \ + -h --help \ + -a --album \ + -p --path +} + +test_list_query() { + initcli list 'x' && + [[ -z "${COMPREPLY[@]}" ]] && + + initcli list 'art' && + completes \ + 'artist:' \ + 'artpath:' && + + initcli list 'artits:x' && + [[ -z "${COMPREPLY[@]}" ]] && + true +} + +test_help_command() { + initcli help '' && + completes $COMMANDS && + true +} + +test_plugin_command() { + initcli te && + completes test && + + initcli test - && + completes -o --option && + true +} + +run_tests() { + local tests=$(set | \ + grep --extended-regexp --only-matching '^test_[a-zA-Z_]* \(\) $' |\ + grep --extended-regexp --only-matching '[a-zA-Z_]*' + ) + local fail=0 + + if [[ -n $@ ]]; then + tests="$@" + fi + + for t in $tests; do + $t || { fail=1 && echo "$t failed" >&2; } + done + return $fail +} + +run_tests "$@" && echo "completion tests passed" Binary files /tmp/tmpHDbwo1/XiVFhfP1ID/beets-1.3.8+dfsg/test/rsrc/unicode’d.mp3 and /tmp/tmpHDbwo1/JoZfM2GcFn/beets-1.3.19/test/rsrc/unicode’d.mp3 differ diff -Nru beets-1.3.8+dfsg/test/testall.py beets-1.3.19/test/testall.py --- beets-1.3.8+dfsg/test/testall.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/testall.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,7 +1,8 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,23 +15,18 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + import os import re import sys -from _common import unittest +from test._common import unittest pkgpath = os.path.dirname(__file__) or '.' sys.path.append(pkgpath) os.chdir(pkgpath) -# Make sure we use local version of beetsplug and not system namespaced version -# for tests -try: - del sys.modules["beetsplug"] -except KeyError: - pass - def suite(): s = unittest.TestSuite() @@ -43,5 +39,6 @@ s.addTest(__import__(modname).suite()) return s + if __name__ == '__main__': unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_art.py beets-1.3.19/test/test_art.py --- beets-1.3.8+dfsg/test/test_art.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_art.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,170 +15,224 @@ """Tests for the album art fetchers.""" +from __future__ import division, absolute_import, print_function + import os import shutil import responses +from mock import patch -import _common -from _common import unittest +from test import _common +from test._common import unittest from beetsplug import fetchart from beets.autotag import AlbumInfo, AlbumMatch +from beets import config from beets import library from beets import importer -from beets import config +from beets import logging +from beets import util +from beets.util.artresizer import ArtResizer, WEBPROXY +from beets.util import confit + + +logger = logging.getLogger('beets.test_art') + + +class UseThePlugin(_common.TestCase): + def setUp(self): + super(UseThePlugin, self).setUp() + self.plugin = fetchart.FetchArtPlugin() -class FetchImageTest(_common.TestCase): +class FetchImageHelper(_common.TestCase): + """Helper mixin for mocking requests when fetching images + with remote art sources. + """ @responses.activate def run(self, *args, **kwargs): - super(FetchImageTest, self).run(*args, **kwargs) + super(FetchImageHelper, self).run(*args, **kwargs) - def mock_response(self, content_type): - responses.add(responses.GET, 'http://example.com', - content_type=content_type) + IMAGEHEADER = {'image/jpeg': b'\x00' * 6 + b'JFIF', + 'image/png': b'\211PNG\r\n\032\n', } + + def mock_response(self, url, content_type='image/jpeg', file_type=None): + if file_type is None: + file_type = content_type + responses.add(responses.GET, url, + content_type=content_type, + # imghdr reads 32 bytes + body=self.IMAGEHEADER.get( + file_type, b'').ljust(32, b'\x00')) + + +class FetchImageTest(FetchImageHelper, UseThePlugin): + URL = 'http://example.com/test.jpg' + + def setUp(self): + super(FetchImageTest, self).setUp() + self.dpath = os.path.join(self.temp_dir, b'arttest') + self.source = fetchart.RemoteArtSource(logger, self.plugin.config) + self.extra = {'maxwidth': 0} + self.candidate = fetchart.Candidate(logger, url=self.URL) def test_invalid_type_returns_none(self): - self.mock_response('image/watercolour') - artpath = fetchart._fetch_image('http://example.com') - self.assertEqual(artpath, None) + self.mock_response(self.URL, 'image/watercolour') + self.source.fetch_image(self.candidate, self.extra) + self.assertEqual(self.candidate.path, None) def test_jpeg_type_returns_path(self): - self.mock_response('image/jpeg') - artpath = fetchart._fetch_image('http://example.com') - self.assertNotEqual(artpath, None) + self.mock_response(self.URL, 'image/jpeg') + self.source.fetch_image(self.candidate, self.extra) + self.assertNotEqual(self.candidate.path, None) + + def test_extension_set_by_content_type(self): + self.mock_response(self.URL, 'image/png') + self.source.fetch_image(self.candidate, self.extra) + self.assertEqual(os.path.splitext(self.candidate.path)[1], b'.png') + self.assertExists(self.candidate.path) + + def test_does_not_rely_on_server_content_type(self): + self.mock_response(self.URL, 'image/jpeg', 'image/png') + self.source.fetch_image(self.candidate, self.extra) + self.assertEqual(os.path.splitext(self.candidate.path)[1], b'.png') + self.assertExists(self.candidate.path) -class FSArtTest(_common.TestCase): +class FSArtTest(UseThePlugin): def setUp(self): super(FSArtTest, self).setUp() - self.dpath = os.path.join(self.temp_dir, 'arttest') + self.dpath = os.path.join(self.temp_dir, b'arttest') os.mkdir(self.dpath) + self.source = fetchart.FileSystem(logger, self.plugin.config) + self.extra = {'cautious': False, + 'cover_names': ('art',), + 'paths': [self.dpath]} + def test_finds_jpg_in_directory(self): - _common.touch(os.path.join(self.dpath, 'a.jpg')) - fn = fetchart.art_in_path(self.dpath, ('art',), False) - self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg')) + _common.touch(os.path.join(self.dpath, b'a.jpg')) + candidate = next(self.source.get(None, self.extra)) + self.assertEqual(candidate.path, os.path.join(self.dpath, b'a.jpg')) def test_appropriately_named_file_takes_precedence(self): - _common.touch(os.path.join(self.dpath, 'a.jpg')) - _common.touch(os.path.join(self.dpath, 'art.jpg')) - fn = fetchart.art_in_path(self.dpath, ('art',), False) - self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg')) + _common.touch(os.path.join(self.dpath, b'a.jpg')) + _common.touch(os.path.join(self.dpath, b'art.jpg')) + candidate = next(self.source.get(None, self.extra)) + self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg')) def test_non_image_file_not_identified(self): - _common.touch(os.path.join(self.dpath, 'a.txt')) - fn = fetchart.art_in_path(self.dpath, ('art',), False) - self.assertEqual(fn, None) + _common.touch(os.path.join(self.dpath, b'a.txt')) + with self.assertRaises(StopIteration): + next(self.source.get(None, self.extra)) def test_cautious_skips_fallback(self): - _common.touch(os.path.join(self.dpath, 'a.jpg')) - fn = fetchart.art_in_path(self.dpath, ('art',), True) - self.assertEqual(fn, None) + _common.touch(os.path.join(self.dpath, b'a.jpg')) + self.extra['cautious'] = True + with self.assertRaises(StopIteration): + next(self.source.get(None, self.extra)) def test_empty_dir(self): - fn = fetchart.art_in_path(self.dpath, ('art',), True) - self.assertEqual(fn, None) + with self.assertRaises(StopIteration): + next(self.source.get(None, self.extra)) def test_precedence_amongst_correct_files(self): - _common.touch(os.path.join(self.dpath, 'back.jpg')) - _common.touch(os.path.join(self.dpath, 'front.jpg')) - _common.touch(os.path.join(self.dpath, 'front-cover.jpg')) - fn = fetchart.art_in_path(self.dpath, - ('cover', 'front', 'back'), False) - self.assertEqual(fn, os.path.join(self.dpath, 'front-cover.jpg')) + images = [b'front-cover.jpg', b'front.jpg', b'back.jpg'] + paths = [os.path.join(self.dpath, i) for i in images] + for p in paths: + _common.touch(p) + self.extra['cover_names'] = ['cover', 'front', 'back'] + candidates = [candidate.path for candidate in + self.source.get(None, self.extra)] + self.assertEqual(candidates, paths) -class CombinedTest(_common.TestCase): +class CombinedTest(FetchImageHelper, UseThePlugin): ASIN = 'xxxx' MBID = 'releaseid' AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \ .format(ASIN) AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}' \ .format(ASIN) - CAA_URL = 'http://coverartarchive.org/release/{0}/front-500.jpg' \ + CAA_URL = 'http://coverartarchive.org/release/{0}/front' \ .format(MBID) def setUp(self): super(CombinedTest, self).setUp() - self.dpath = os.path.join(self.temp_dir, 'arttest') + self.dpath = os.path.join(self.temp_dir, b'arttest') os.mkdir(self.dpath) - # Set up configuration. - fetchart.FetchArtPlugin() - - @responses.activate - def run(self, *args, **kwargs): - super(CombinedTest, self).run(*args, **kwargs) - - def mock_response(self, url, content_type='image/jpeg'): - responses.add(responses.GET, url, content_type=content_type) - def test_main_interface_returns_amazon_art(self): self.mock_response(self.AMAZON_URL) album = _common.Bag(asin=self.ASIN) - artpath = fetchart.art_for_album(album, None) - self.assertNotEqual(artpath, None) + candidate = self.plugin.art_for_album(album, None) + self.assertIsNotNone(candidate) def test_main_interface_returns_none_for_missing_asin_and_path(self): album = _common.Bag() - artpath = fetchart.art_for_album(album, None) - self.assertEqual(artpath, None) + candidate = self.plugin.art_for_album(album, None) + self.assertIsNone(candidate) def test_main_interface_gives_precedence_to_fs_art(self): - _common.touch(os.path.join(self.dpath, 'art.jpg')) + _common.touch(os.path.join(self.dpath, b'art.jpg')) self.mock_response(self.AMAZON_URL) album = _common.Bag(asin=self.ASIN) - artpath = fetchart.art_for_album(album, [self.dpath]) - self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg')) + candidate = self.plugin.art_for_album(album, [self.dpath]) + self.assertIsNotNone(candidate) + self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg')) def test_main_interface_falls_back_to_amazon(self): self.mock_response(self.AMAZON_URL) album = _common.Bag(asin=self.ASIN) - artpath = fetchart.art_for_album(album, [self.dpath]) - self.assertNotEqual(artpath, None) - self.assertFalse(artpath.startswith(self.dpath)) + candidate = self.plugin.art_for_album(album, [self.dpath]) + self.assertIsNotNone(candidate) + self.assertFalse(candidate.path.startswith(self.dpath)) def test_main_interface_tries_amazon_before_aao(self): self.mock_response(self.AMAZON_URL) album = _common.Bag(asin=self.ASIN) - fetchart.art_for_album(album, [self.dpath]) + self.plugin.art_for_album(album, [self.dpath]) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL) def test_main_interface_falls_back_to_aao(self): self.mock_response(self.AMAZON_URL, content_type='text/html') album = _common.Bag(asin=self.ASIN) - fetchart.art_for_album(album, [self.dpath]) + self.plugin.art_for_album(album, [self.dpath]) self.assertEqual(responses.calls[-1].request.url, self.AAO_URL) def test_main_interface_uses_caa_when_mbid_available(self): self.mock_response(self.CAA_URL) album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN) - artpath = fetchart.art_for_album(album, None) - self.assertNotEqual(artpath, None) + candidate = self.plugin.art_for_album(album, None) + self.assertIsNotNone(candidate) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.CAA_URL) def test_local_only_does_not_access_network(self): album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN) - artpath = fetchart.art_for_album(album, [self.dpath], local_only=True) - self.assertEqual(artpath, None) + self.plugin.art_for_album(album, None, local_only=True) self.assertEqual(len(responses.calls), 0) def test_local_only_gets_fs_image(self): - _common.touch(os.path.join(self.dpath, 'art.jpg')) + _common.touch(os.path.join(self.dpath, b'art.jpg')) album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN) - artpath = fetchart.art_for_album(album, [self.dpath], - None, local_only=True) - self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg')) + candidate = self.plugin.art_for_album(album, [self.dpath], + local_only=True) + self.assertIsNotNone(candidate) + self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg')) self.assertEqual(len(responses.calls), 0) -class AAOTest(_common.TestCase): +class AAOTest(UseThePlugin): ASIN = 'xxxx' AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN) + def setUp(self): + super(AAOTest, self).setUp() + self.source = fetchart.AlbumArtOrg(logger, self.plugin.config) + self.extra = dict() + @responses.activate def run(self, *args, **kwargs): super(AAOTest, self).run(*args, **kwargs) @@ -189,24 +244,28 @@ def test_aao_scraper_finds_image(self): body = """
- - View larger image + + \"View """ self.mock_response(self.AAO_URL, body) - res = fetchart.aao_art(self.ASIN) - self.assertEqual(res, 'TARGET_URL') + album = _common.Bag(asin=self.ASIN) + candidate = next(self.source.get(album, self.extra)) + self.assertEqual(candidate.url, 'TARGET_URL') - def test_aao_scraper_returns_none_when_no_image_present(self): + def test_aao_scraper_returns_no_result_when_no_image_present(self): self.mock_response(self.AAO_URL, 'blah blah') - res = fetchart.aao_art(self.ASIN) - self.assertEqual(res, None) - + album = _common.Bag(asin=self.ASIN) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) -class GoogleImageTest(_common.TestCase): - _google_url = 'https://ajax.googleapis.com/ajax/services/search/images' +class GoogleImageTest(UseThePlugin): + def setUp(self): + super(GoogleImageTest, self).setUp() + self.source = fetchart.GoogleImages(logger, self.plugin.config) + self.extra = dict() @responses.activate def run(self, *args, **kwargs): @@ -218,50 +277,157 @@ def test_google_art_finds_image(self): album = _common.Bag(albumartist="some artist", album="some album") - json = """{"responseData": {"results": - [{"unescapedUrl": "url_to_the_image"}]}}""" - self.mock_response(self._google_url, json) - result_url = fetchart.google_art(album) - self.assertEqual(result_url, 'url_to_the_image') + json = '{"items": [{"link": "url_to_the_image"}]}' + self.mock_response(fetchart.GoogleImages.URL, json) + candidate = next(self.source.get(album, self.extra)) + self.assertEqual(candidate.url, 'url_to_the_image') - def test_google_art_dont_finds_image(self): + def test_google_art_returns_no_result_when_error_received(self): + album = _common.Bag(albumartist="some artist", album="some album") + json = '{"error": {"errors": [{"reason": "some reason"}]}}' + self.mock_response(fetchart.GoogleImages.URL, json) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) + + def test_google_art_returns_no_result_with_malformed_response(self): album = _common.Bag(albumartist="some artist", album="some album") json = """bla blup""" - self.mock_response(self._google_url, json) - result_url = fetchart.google_art(album) - self.assertEqual(result_url, None) + self.mock_response(fetchart.GoogleImages.URL, json) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) + + +class FanartTVTest(UseThePlugin): + RESPONSE_MULTIPLE = u"""{ + "name": "artistname", + "mbid_id": "artistid", + "albums": { + "thereleasegroupid": { + "albumcover": [ + { + "id": "24", + "url": "http://example.com/1.jpg", + "likes": "0" + }, + { + "id": "42", + "url": "http://example.com/2.jpg", + "likes": "0" + }, + { + "id": "23", + "url": "http://example.com/3.jpg", + "likes": "0" + } + ], + "cdart": [ + { + "id": "123", + "url": "http://example.com/4.jpg", + "likes": "0", + "disc": "1", + "size": "1000" + } + ] + } + } + }""" + RESPONSE_NO_ART = u"""{ + "name": "artistname", + "mbid_id": "artistid", + "albums": { + "thereleasegroupid": { + "cdart": [ + { + "id": "123", + "url": "http://example.com/4.jpg", + "likes": "0", + "disc": "1", + "size": "1000" + } + ] + } + } + }""" + RESPONSE_ERROR = u"""{ + "status": "error", + "error message": "the error message" + }""" + RESPONSE_MALFORMED = u"bla blup" + + def setUp(self): + super(FanartTVTest, self).setUp() + self.source = fetchart.FanartTV(logger, self.plugin.config) + self.extra = dict() + @responses.activate + def run(self, *args, **kwargs): + super(FanartTVTest, self).run(*args, **kwargs) -class ArtImporterTest(_common.TestCase): + def mock_response(self, url, json): + responses.add(responses.GET, url, body=json, + content_type='application/json') + + def test_fanarttv_finds_image(self): + album = _common.Bag(mb_releasegroupid=u'thereleasegroupid') + self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid', + self.RESPONSE_MULTIPLE) + candidate = next(self.source.get(album, self.extra)) + self.assertEqual(candidate.url, 'http://example.com/1.jpg') + + def test_fanarttv_returns_no_result_when_error_received(self): + album = _common.Bag(mb_releasegroupid=u'thereleasegroupid') + self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid', + self.RESPONSE_ERROR) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) + + def test_fanarttv_returns_no_result_with_malformed_response(self): + album = _common.Bag(mb_releasegroupid=u'thereleasegroupid') + self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid', + self.RESPONSE_MALFORMED) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) + + def test_fanarttv_only_other_images(self): + # The source used to fail when there were images present, but no cover + album = _common.Bag(mb_releasegroupid=u'thereleasegroupid') + self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid', + self.RESPONSE_NO_ART) + with self.assertRaises(StopIteration): + next(self.source.get(album, self.extra)) + + +@_common.slow_test() +class ArtImporterTest(UseThePlugin): def setUp(self): super(ArtImporterTest, self).setUp() # Mock the album art fetcher to always return our test file. - self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg') + self.art_file = os.path.join(self.temp_dir, b'tmpcover.jpg') _common.touch(self.art_file) - self.old_afa = fetchart.art_for_album - self.afa_response = self.art_file + self.old_afa = self.plugin.art_for_album + self.afa_response = fetchart.Candidate(logger, path=self.art_file) - def art_for_album(i, p, maxwidth=None, local_only=False): + def art_for_album(i, p, local_only=False): return self.afa_response - fetchart.art_for_album = art_for_album + self.plugin.art_for_album = art_for_album # Test library. self.libpath = os.path.join(self.temp_dir, 'tmplib.blb') self.libdir = os.path.join(self.temp_dir, 'tmplib') os.mkdir(self.libdir) - os.mkdir(os.path.join(self.libdir, 'album')) - itempath = os.path.join(self.libdir, 'album', 'test.mp3') - shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath) + os.mkdir(os.path.join(self.libdir, b'album')) + itempath = os.path.join(self.libdir, b'album', b'test.mp3') + shutil.copyfile(os.path.join(_common.RSRC, b'full.mp3'), itempath) self.lib = library.Library(self.libpath) self.i = _common.item() self.i.path = itempath self.album = self.lib.add_album([self.i]) self.lib._connection().commit() - # The plugin and import configuration. - self.plugin = fetchart.FetchArtPlugin() + # The import configuration. self.session = _common.import_session(self.lib) # Import task for the coroutine. @@ -269,10 +435,10 @@ self.task.is_album = True self.task.album = self.album info = AlbumInfo( - album='some album', - album_id='albumid', - artist='some artist', - artist_id='artistid', + album=u'some album', + album_id=u'albumid', + artist=u'some artist', + artist_id=u'artistid', tracks=[], ) self.task.set_choice(AlbumMatch(0, info, {}, set(), set())) @@ -280,7 +446,7 @@ def tearDown(self): self.lib._connection().close() super(ArtImporterTest, self).tearDown() - fetchart.art_for_album = self.old_afa + self.plugin.art_for_album = self.old_afa def _fetch_art(self, should_exist): """Execute the fetch_art coroutine for the task and return the @@ -296,7 +462,7 @@ if should_exist: self.assertEqual( artpath, - os.path.join(os.path.dirname(self.i.path), 'cover.jpg') + os.path.join(os.path.dirname(self.i.path), b'cover.jpg') ) self.assertExists(artpath) else: @@ -320,20 +486,167 @@ self.assertExists(self.art_file) def test_delete_original_file(self): - config['import']['delete'] = True - self._fetch_art(True) - self.assertNotExists(self.art_file) - - def test_move_original_file(self): - config['import']['move'] = True + self.plugin.src_removed = True self._fetch_art(True) self.assertNotExists(self.art_file) def test_do_not_delete_original_if_already_in_place(self): - artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg') + artdest = os.path.join(os.path.dirname(self.i.path), b'cover.jpg') shutil.copyfile(self.art_file, artdest) - self.afa_response = artdest + self.afa_response = fetchart.Candidate(logger, path=artdest) + self._fetch_art(True) + + def test_fetch_art_if_imported_file_deleted(self): + # See #1126. Test the following scenario: + # - Album art imported, `album.artpath` set. + # - Imported album art file subsequently deleted (by user or other + # program). + # `fetchart` should import album art again instead of printing the + # message " has album art". self._fetch_art(True) + util.remove(self.album.artpath) + self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False) + self.assertExists(self.album.artpath) + + +class ArtForAlbumTest(UseThePlugin): + """ Tests that fetchart.art_for_album respects the size + configuration (e.g., minwidth, enforce_ratio) + """ + + IMG_225x225 = os.path.join(_common.RSRC, b'abbey.jpg') + IMG_348x348 = os.path.join(_common.RSRC, b'abbey-different.jpg') + IMG_500x490 = os.path.join(_common.RSRC, b'abbey-similar.jpg') + + def setUp(self): + super(ArtForAlbumTest, self).setUp() + + self.old_fs_source_get = fetchart.FileSystem.get + + def fs_source_get(_self, album, extra): + if extra['paths']: + yield fetchart.Candidate(logger, path=self.image_file) + + fetchart.FileSystem.get = fs_source_get + + self.album = _common.Bag() + + def tearDown(self): + fetchart.FileSystem.get = self.old_fs_source_get + super(ArtForAlbumTest, self).tearDown() + + def _assertImageIsValidArt(self, image_file, should_exist): # noqa + self.assertExists(image_file) + self.image_file = image_file + + candidate = self.plugin.art_for_album(self.album, [''], True) + + if should_exist: + self.assertNotEqual(candidate, None) + self.assertEqual(candidate.path, self.image_file) + self.assertExists(candidate.path) + else: + self.assertIsNone(candidate) + + def _assertImageResized(self, image_file, should_resize): # noqa + self.image_file = image_file + with patch.object(ArtResizer.shared, 'resize') as mock_resize: + self.plugin.art_for_album(self.album, [''], True) + self.assertEqual(mock_resize.called, should_resize) + + def _require_backend(self): + """Skip the test if the art resizer doesn't have ImageMagick or + PIL (so comparisons and measurements are unavailable). + """ + if ArtResizer.shared.method[0] == WEBPROXY: + self.skipTest(u"ArtResizer has no local imaging backend available") + + def test_respect_minwidth(self): + self._require_backend() + self.plugin.minwidth = 300 + self._assertImageIsValidArt(self.IMG_225x225, False) + self._assertImageIsValidArt(self.IMG_348x348, True) + + def test_respect_enforce_ratio_yes(self): + self._require_backend() + self.plugin.enforce_ratio = True + self._assertImageIsValidArt(self.IMG_500x490, False) + self._assertImageIsValidArt(self.IMG_225x225, True) + + def test_respect_enforce_ratio_no(self): + self.plugin.enforce_ratio = False + self._assertImageIsValidArt(self.IMG_500x490, True) + + def test_respect_enforce_ratio_px_above(self): + self._require_backend() + self.plugin.enforce_ratio = True + self.plugin.margin_px = 5 + self._assertImageIsValidArt(self.IMG_500x490, False) + + def test_respect_enforce_ratio_px_below(self): + self._require_backend() + self.plugin.enforce_ratio = True + self.plugin.margin_px = 15 + self._assertImageIsValidArt(self.IMG_500x490, True) + + def test_respect_enforce_ratio_percent_above(self): + self._require_backend() + self.plugin.enforce_ratio = True + self.plugin.margin_percent = (500 - 490) / 500 * 0.5 + self._assertImageIsValidArt(self.IMG_500x490, False) + + def test_respect_enforce_ratio_percent_below(self): + self._require_backend() + self.plugin.enforce_ratio = True + self.plugin.margin_percent = (500 - 490) / 500 * 1.5 + self._assertImageIsValidArt(self.IMG_500x490, True) + + def test_resize_if_necessary(self): + self._require_backend() + self.plugin.maxwidth = 300 + self._assertImageResized(self.IMG_225x225, False) + self._assertImageResized(self.IMG_348x348, True) + + +class DeprecatedConfigTest(_common.TestCase): + """While refactoring the plugin, the remote_priority option was deprecated, + and a new codepath should translate its effect. Check that it actually does + so. + """ + + # If we subclassed UseThePlugin, the configuration change would either be + # overwritten by _common.TestCase or be set after constructing the + # plugin object + def setUp(self): + super(DeprecatedConfigTest, self).setUp() + config['fetchart']['remote_priority'] = True + self.plugin = fetchart.FetchArtPlugin() + + def test_moves_filesystem_to_end(self): + self.assertEqual(type(self.plugin.sources[-1]), fetchart.FileSystem) + + +class EnforceRatioConfigTest(_common.TestCase): + """Throw some data at the regexes.""" + + def _load_with_config(self, values, should_raise): + if should_raise: + for v in values: + config['fetchart']['enforce_ratio'] = v + with self.assertRaises(confit.ConfigValueError): + fetchart.FetchArtPlugin() + else: + for v in values: + config['fetchart']['enforce_ratio'] = v + fetchart.FetchArtPlugin() + + def test_px(self): + self._load_with_config(u'0px 4px 12px 123px'.split(), False) + self._load_with_config(u'00px stuff5px'.split(), True) + + def test_percent(self): + self._load_with_config(u'0% 0.00% 5.1% 5% 100%'.split(), False) + self._load_with_config(u'00% 1.234% foo5% 100.1%'.split(), True) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_autotag.py beets-1.3.19/test/test_autotag.py --- beets-1.3.8+dfsg/test/test_autotag.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_autotag.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,11 +15,13 @@ """Tests for autotagging functionality. """ +from __future__ import division, absolute_import, print_function + import re import copy -import _common -from _common import unittest +from test import _common +from test._common import unittest from beets import autotag from beets.autotag import match from beets.autotag.hooks import Distance, string_dist @@ -44,7 +47,7 @@ def test_plurality_conflict(self): objs = [1, 1, 2, 2, 3] obj, freq = plurality(objs) - self.assert_(obj in (1, 2)) + self.assertTrue(obj in (1, 2)) self.assertEqual(freq, 2) def test_plurality_empty_sequence_raises_error(self): @@ -569,7 +572,7 @@ return Item( artist=u'ben harper', album=u'burn to shine', - title=u'ben harper - Burn to Shine ' + str(i), + title=u'ben harper - Burn to Shine {0}'.format(i), track=i, length=length, mb_trackid='', mb_albumid='', mb_artistid='', @@ -632,7 +635,7 @@ trackinfo = [] trackinfo.append(TrackInfo( u'oneNew', - 'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', + u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', medium=1, medium_index=1, medium_total=1, @@ -642,7 +645,7 @@ )) trackinfo.append(TrackInfo( u'twoNew', - '40130ed1-a27c-42fd-a328-1ebefb6caef4', + u'40130ed1-a27c-42fd-a328-1ebefb6caef4', medium=2, medium_index=1, index=2, @@ -727,7 +730,7 @@ my_info = copy.deepcopy(self.info) self._apply(info=my_info) self.assertEqual(self.items[0].artist, 'artistNew') - self.assertEqual(self.items[0].artist, 'artistNew') + self.assertEqual(self.items[1].artist, 'artistNew') def test_album_artist_overriden_by_nonempty_track_artist(self): my_info = copy.deepcopy(self.info) @@ -786,6 +789,13 @@ self.assertEqual(self.items[0].month, 2) self.assertEqual(self.items[0].day, 3) + def test_data_source_applied(self): + my_info = copy.deepcopy(self.info) + my_info.data_source = 'MusicBrainz' + self._apply(info=my_info) + + self.assertEqual(self.items[0].data_source, 'MusicBrainz') + class ApplyCompilationTest(_common.TestCase, ApplyTestUtil): def setUp(self): @@ -797,16 +807,16 @@ trackinfo = [] trackinfo.append(TrackInfo( u'oneNew', - 'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', + u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', u'artistOneNew', - 'a05686fc-9db2-4c23-b99e-77f5db3e5282', + u'a05686fc-9db2-4c23-b99e-77f5db3e5282', index=1, )) trackinfo.append(TrackInfo( u'twoNew', - '40130ed1-a27c-42fd-a328-1ebefb6caef4', + u'40130ed1-a27c-42fd-a328-1ebefb6caef4', u'artistTwoNew', - '80b3cf5e-18fe-4c59-98c7-e5bb87210710', + u'80b3cf5e-18fe-4c59-98c7-e5bb87210710', index=2, )) self.info = AlbumInfo( @@ -869,17 +879,17 @@ def test_leading_the_has_lower_weight(self): dist1 = string_dist(u'XXX Band Name', u'Band Name') dist2 = string_dist(u'The Band Name', u'Band Name') - self.assert_(dist2 < dist1) + self.assertTrue(dist2 < dist1) def test_parens_have_lower_weight(self): dist1 = string_dist(u'One .Two.', u'One') dist2 = string_dist(u'One (Two)', u'One') - self.assert_(dist2 < dist1) + self.assertTrue(dist2 < dist1) def test_brackets_have_lower_weight(self): dist1 = string_dist(u'One .Two.', u'One') dist2 = string_dist(u'One [Two]', u'One') - self.assert_(dist2 < dist1) + self.assertTrue(dist2 < dist1) def test_ep_label_has_zero_weight(self): dist = string_dist(u'My Song (EP)', u'My Song') @@ -888,7 +898,7 @@ def test_featured_has_lower_weight(self): dist1 = string_dist(u'My Song blah Someone', u'My Song') dist2 = string_dist(u'My Song feat Someone', u'My Song') - self.assert_(dist2 < dist1) + self.assertTrue(dist2 < dist1) def test_postfix_the(self): dist = string_dist(u'The Song Title', u'Song Title, The') @@ -930,13 +940,13 @@ Test Enum Subclasses defined in beets.util.enumeration """ def test_ordered_enum(self): - OrderedEnumTest = match.OrderedEnum('OrderedEnumTest', ['a', 'b', 'c']) - self.assertLess(OrderedEnumTest.a, OrderedEnumTest.b) - self.assertLess(OrderedEnumTest.a, OrderedEnumTest.c) - self.assertLess(OrderedEnumTest.b, OrderedEnumTest.c) - self.assertGreater(OrderedEnumTest.b, OrderedEnumTest.a) - self.assertGreater(OrderedEnumTest.c, OrderedEnumTest.a) - self.assertGreater(OrderedEnumTest.c, OrderedEnumTest.b) + OrderedEnumClass = match.OrderedEnum('OrderedEnumTest', ['a', 'b', 'c']) # noqa + self.assertLess(OrderedEnumClass.a, OrderedEnumClass.b) + self.assertLess(OrderedEnumClass.a, OrderedEnumClass.c) + self.assertLess(OrderedEnumClass.b, OrderedEnumClass.c) + self.assertGreater(OrderedEnumClass.b, OrderedEnumClass.a) + self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.a) + self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.b) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_bucket.py beets-1.3.19/test/test_bucket.py --- beets-1.3.8+dfsg/test/test_bucket.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_bucket.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Fabrice Laporte. +# Copyright 2016, Fabrice Laporte. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,12 +15,13 @@ """Tests for the 'bucket' plugin.""" -from nose.tools import raises -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test._common import unittest from beetsplug import bucket from beets import config, ui -from helper import TestHelper +from test.helper import TestHelper class BucketPluginTest(unittest.TestCase, TestHelper): @@ -51,7 +52,7 @@ year.""" self._setup_config(bucket_year=['1950', '1970']) self.assertEqual(self.plugin._tmpl_bucket('2014'), '1970') - self.assertEqual(self.plugin._tmpl_bucket('2015'), '2015') + self.assertEqual(self.plugin._tmpl_bucket('2025'), '2025') def test_year_two_years(self): """Buckets can be named with the 'from-to' syntax.""" @@ -129,26 +130,35 @@ self.assertEqual(self.plugin._tmpl_bucket('…and Oceans'), 'A - D') self.assertEqual(self.plugin._tmpl_bucket('Eagles'), 'E - L') - @raises(ui.UserError) def test_bad_alpha_range_def(self): - """If bad alpha range definition, a UserError is raised""" - self._setup_config(bucket_alpha=['$%']) - self.assertEqual(self.plugin._tmpl_bucket('errol'), 'E') + """If bad alpha range definition, a UserError is raised.""" + with self.assertRaises(ui.UserError): + self._setup_config(bucket_alpha=['$%']) - @raises(ui.UserError) def test_bad_year_range_def_no4digits(self): """If bad year range definition, a UserError is raised. - Range origin must be expressed on 4 digits.""" - self._setup_config(bucket_year=['62-64']) - # from year must be expressed on 4 digits - self.assertEqual(self.plugin._tmpl_bucket('1963'), '62-64') + Range origin must be expressed on 4 digits. + """ + with self.assertRaises(ui.UserError): + self._setup_config(bucket_year=['62-64']) - @raises(ui.UserError) def test_bad_year_range_def_nodigits(self): """If bad year range definition, a UserError is raised. - At least the range origin must be declared.""" - self._setup_config(bucket_year=['nodigits']) - self.assertEqual(self.plugin._tmpl_bucket('1963'), '62-64') + At least the range origin must be declared. + """ + with self.assertRaises(ui.UserError): + self._setup_config(bucket_year=['nodigits']) + + def check_span_from_str(self, sstr, dfrom, dto): + d = bucket.span_from_str(sstr) + self.assertEqual(dfrom, d['from']) + self.assertEqual(dto, d['to']) + + def test_span_from_str(self): + self.check_span_from_str("1980 2000", 1980, 2000) + self.check_span_from_str("1980 00", 1980, 2000) + self.check_span_from_str("1930 00", 1930, 2000) + self.check_span_from_str("1930 50", 1930, 1950) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_config_command.py beets-1.3.19/test/test_config_command.py --- beets-1.3.8+dfsg/test/test_config_command.py 2014-04-12 20:40:45.000000000 +0000 +++ beets-1.3.19/test/test_config_command.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,3 +1,7 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + import os import yaml from mock import patch @@ -7,9 +11,9 @@ from beets import ui from beets import config -import _common -from _common import unittest -from helper import TestHelper, capture_stdout +from test._common import unittest +from test.helper import TestHelper, capture_stdout +from beets.library import Library class ConfigCommandTest(unittest.TestCase, TestHelper): @@ -23,13 +27,15 @@ self.config_path = os.path.join(self.temp_dir, 'config.yaml') with open(self.config_path, 'w') as file: file.write('library: lib\n') - file.write('option: value') + file.write('option: value\n') + file.write('password: password_value') self.cli_config_path = os.path.join(self.temp_dir, 'cli_config.yaml') with open(self.cli_config_path, 'w') as file: file.write('option: cli overwrite') config.clear() + config['password'].redact = True config._materialized = False def tearDown(self): @@ -37,15 +43,17 @@ def test_show_user_config(self): with capture_stdout() as output: - self.run_command('config') + self.run_command('config', '-c') output = yaml.load(output.getvalue()) self.assertEqual(output['option'], 'value') + self.assertEqual(output['password'], 'password_value') def test_show_user_config_with_defaults(self): with capture_stdout() as output: - self.run_command('config', '-d') + self.run_command('config', '-dc') output = yaml.load(output.getvalue()) self.assertEqual(output['option'], 'value') + self.assertEqual(output['password'], 'password_value') self.assertEqual(output['library'], 'lib') self.assertEqual(output['import']['timid'], False) @@ -56,6 +64,21 @@ self.assertEqual(output['library'], 'lib') self.assertEqual(output['option'], 'cli overwrite') + def test_show_redacted_user_config(self): + with capture_stdout() as output: + self.run_command('config') + output = yaml.load(output.getvalue()) + self.assertEqual(output['option'], 'value') + self.assertEqual(output['password'], 'REDACTED') + + def test_show_redacted_user_config_with_defaults(self): + with capture_stdout() as output: + self.run_command('config', '-d') + output = yaml.load(output.getvalue()) + self.assertEqual(output['option'], 'value') + self.assertEqual(output['password'], 'REDACTED') + self.assertEqual(output['import']['timid'], False) + def test_config_paths(self): with capture_stdout() as output: self.run_command('config', '-p') @@ -77,33 +100,35 @@ execlp.assert_called_once_with( 'myeditor', 'myeditor', self.config_path) - def test_edit_config_with_open(self): - with _common.system_mock('Darwin'): - with patch('os.execlp') as execlp: - self.run_command('config', '-e') - execlp.assert_called_once_with( - 'open', 'open', '-n', self.config_path) - - def test_edit_config_with_xdg_open(self): - with _common.system_mock('Linux'): + def test_edit_config_with_automatic_open(self): + with patch('beets.util.open_anything') as open: + open.return_value = 'please_open' with patch('os.execlp') as execlp: self.run_command('config', '-e') execlp.assert_called_once_with( - 'xdg-open', 'xdg-open', self.config_path) - - def test_edit_config_with_windows_exec(self): - with _common.system_mock('Windows'): - with patch('os.execlp') as execlp: - self.run_command('config', '-e') - execlp.assert_called_once_with(self.config_path, self.config_path) + 'please_open', 'please_open', self.config_path) def test_config_editor_not_found(self): with self.assertRaises(ui.UserError) as user_error: with patch('os.execlp') as execlp: - execlp.side_effect = OSError() + execlp.side_effect = OSError('here is problem') self.run_command('config', '-e') self.assertIn('Could not edit configuration', - str(user_error.exception.args[0])) + unicode(user_error.exception)) + self.assertIn('here is problem', unicode(user_error.exception)) + + def test_edit_invalid_config_file(self): + self.lib = Library(':memory:') + with open(self.config_path, 'w') as file: + file.write('invalid: [') + config.clear() + config._materialized = False + + os.environ['EDITOR'] = 'myeditor' + with patch('os.execlp') as execlp: + self.run_command('config', '-e') + execlp.assert_called_once_with( + 'myeditor', 'myeditor', self.config_path) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_convert.py beets-1.3.19/test/test_convert.py --- beets-1.3.8+dfsg/test/test_convert.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/test/test_convert.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,14 +13,18 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + import re import os.path -import _common -from _common import unittest -import helper -from helper import control_stdin +from test import _common +from test._common import unittest +from test import helper +from test.helper import control_stdin from beets.mediafile import MediaFile +from beets import util +from beets import ui class TestHelper(helper.TestHelper): @@ -31,33 +36,37 @@ if re.search('[^a-zA-Z0-9]', tag): raise ValueError(u"tag '{0}' must only contain letters and digits" .format(tag)) - # FIXME This is not portable. For windows we need to use our own - # python script that performs the same task. - return u'cp $source $dest; printf {0} >> $dest'.format(tag) - def assertFileTag(self, path, tag): + # A Python script that copies the file and appends a tag. + stub = os.path.join(_common.RSRC, b'convert_stub.py').decode('utf-8') + return u"python '{}' $source $dest {}".format(stub, tag) + + def assertFileTag(self, path, tag): # noqa """Assert that the path is a file and the files content ends with `tag`. """ + tag = tag.encode('utf-8') self.assertTrue(os.path.isfile(path), u'{0} is not a file'.format(path)) - with open(path) as f: + with open(path, 'rb') as f: f.seek(-len(tag), os.SEEK_END) self.assertEqual(f.read(), tag, u'{0} is not tagged with {1}'.format(path, tag)) - def assertNoFileTag(self, path, tag): + def assertNoFileTag(self, path, tag): # noqa """Assert that the path is a file and the files content does not end with `tag`. """ + tag = tag.encode('utf-8') self.assertTrue(os.path.isfile(path), u'{0} is not a file'.format(path)) - with open(path) as f: + with open(path, 'rb') as f: f.seek(-len(tag), os.SEEK_END) self.assertNotEqual(f.read(), tag, u'{0} is unexpectedly tagged with {1}' .format(path, tag)) +@_common.slow_test() class ImportConvertTest(unittest.TestCase, TestHelper): def setUp(self): @@ -66,7 +75,7 @@ self.load_plugins('convert') self.config['convert'] = { - 'dest': os.path.join(self.temp_dir, 'convert'), + 'dest': os.path.join(self.temp_dir, b'convert'), 'command': self.tagged_copy_cmd('convert'), # Enforce running convert 'max_bitrate': 1, @@ -93,7 +102,26 @@ self.assertTrue(os.path.isfile(item.path)) -class ConvertCliTest(unittest.TestCase, TestHelper): +class ConvertCommand(object): + """A mixin providing a utility method to run the `convert`command + in tests. + """ + def run_convert_path(self, path, *args): + """Run the `convert` command on a given path.""" + # The path is currently a filesystem bytestring. Convert it to + # an argument bytestring. + path = path.decode(util._fsencoding()).encode(ui._arg_encoding()) + + args = args + (b'path:' + path,) + return self.run_command('convert', *args) + + def run_convert(self, *args): + """Run the `convert` command on `self.item`.""" + return self.run_convert_path(self.item.path, *args) + + +@_common.slow_test() +class ConvertCliTest(unittest.TestCase, TestHelper, ConvertCommand): def setUp(self): self.setup_beets(disk=True) # Converter is threaded @@ -101,7 +129,9 @@ self.item = self.album.items()[0] self.load_plugins('convert') - self.convert_dest = os.path.join(self.temp_dir, 'convert_dest') + self.convert_dest = util.bytestring_path( + os.path.join(self.temp_dir, b'convert_dest') + ) self.config['convert'] = { 'dest': self.convert_dest, 'paths': {'default': 'converted'}, @@ -121,52 +151,66 @@ def test_convert(self): with control_stdin('y'): - self.run_command('convert', self.item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert() + converted = os.path.join(self.convert_dest, b'converted.mp3') self.assertFileTag(converted, 'mp3') def test_convert_with_auto_confirmation(self): - self.run_command('convert', '--yes', self.item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert('--yes') + converted = os.path.join(self.convert_dest, b'converted.mp3') self.assertFileTag(converted, 'mp3') def test_rejecet_confirmation(self): with control_stdin('n'): - self.run_command('convert', self.item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert() + converted = os.path.join(self.convert_dest, b'converted.mp3') self.assertFalse(os.path.isfile(converted)) def test_convert_keep_new(self): - self.assertEqual(os.path.splitext(self.item.path)[1], '.ogg') + self.assertEqual(os.path.splitext(self.item.path)[1], b'.ogg') with control_stdin('y'): - self.run_command('convert', '--keep-new', self.item.path) + self.run_convert('--keep-new') self.item.load() - self.assertEqual(os.path.splitext(self.item.path)[1], '.mp3') + self.assertEqual(os.path.splitext(self.item.path)[1], b'.mp3') def test_format_option(self): with control_stdin('y'): - self.run_command('convert', '--format', 'opus', self.item.path) - converted = os.path.join(self.convert_dest, 'converted.ops') + self.run_convert('--format', 'opus') + converted = os.path.join(self.convert_dest, b'converted.ops') self.assertFileTag(converted, 'opus') def test_embed_album_art(self): self.config['convert']['embed'] = True - image_path = os.path.join(_common.RSRC, 'image-2x3.jpg') + image_path = os.path.join(_common.RSRC, b'image-2x3.jpg') self.album.artpath = image_path self.album.store() - with open(os.path.join(image_path)) as f: + with open(os.path.join(image_path), 'rb') as f: image_data = f.read() with control_stdin('y'): - self.run_command('convert', self.item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert() + converted = os.path.join(self.convert_dest, b'converted.mp3') mediafile = MediaFile(converted) self.assertEqual(mediafile.images[0].data, image_data) - -class NeverConvertLossyFilesTest(unittest.TestCase, TestHelper): + def test_skip_existing(self): + converted = os.path.join(self.convert_dest, b'converted.mp3') + self.touch(converted, content='XXX') + self.run_convert('--yes') + with open(converted, 'r') as f: + self.assertEqual(f.read(), 'XXX') + + def test_pretend(self): + self.run_convert('--pretend') + converted = os.path.join(self.convert_dest, b'converted.mp3') + self.assertFalse(os.path.exists(converted)) + + +@_common.slow_test() +class NeverConvertLossyFilesTest(unittest.TestCase, TestHelper, + ConvertCommand): """Test the effect of the `never_convert_lossy_files` option. """ @@ -174,7 +218,7 @@ self.setup_beets(disk=True) # Converter is threaded self.load_plugins('convert') - self.convert_dest = os.path.join(self.temp_dir, 'convert_dest') + self.convert_dest = os.path.join(self.temp_dir, b'convert_dest') self.config['convert'] = { 'dest': self.convert_dest, 'paths': {'default': 'converted'}, @@ -192,23 +236,23 @@ def test_transcode_from_lossles(self): [item] = self.add_item_fixtures(ext='flac') with control_stdin('y'): - self.run_command('convert', item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert_path(item.path) + converted = os.path.join(self.convert_dest, b'converted.mp3') self.assertFileTag(converted, 'mp3') def test_transcode_from_lossy(self): self.config['convert']['never_convert_lossy_files'] = False [item] = self.add_item_fixtures(ext='ogg') with control_stdin('y'): - self.run_command('convert', item.path) - converted = os.path.join(self.convert_dest, 'converted.mp3') + self.run_convert_path(item.path) + converted = os.path.join(self.convert_dest, b'converted.mp3') self.assertFileTag(converted, 'mp3') def test_transcode_from_lossy_prevented(self): [item] = self.add_item_fixtures(ext='ogg') with control_stdin('y'): - self.run_command('convert', item.path) - converted = os.path.join(self.convert_dest, 'converted.ogg') + self.run_convert_path(item.path) + converted = os.path.join(self.convert_dest, b'converted.ogg') self.assertNoFileTag(converted, 'mp3') diff -Nru beets-1.3.8+dfsg/test/test_datequery.py beets-1.3.19/test/test_datequery.py --- beets-1.3.8+dfsg/test/test_datequery.py 2014-03-22 17:57:21.000000000 +0000 +++ beets-1.3.19/test/test_datequery.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,8 +15,10 @@ """Test for dbcore's date-based queries. """ -import _common -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test import _common +from test._common import unittest from datetime import datetime import time from beets.dbcore.query import _parse_periods, DateInterval, DateQuery @@ -59,14 +62,14 @@ self.assertContains('..', date=datetime.min) self.assertContains('..', '1000-01-01T00:00:00') - def assertContains(self, interval_pattern, date_pattern=None, date=None): + def assertContains(self, interval_pattern, date_pattern=None, date=None): # noqa if date is None: date = _date(date_pattern) (start, end) = _parse_periods(interval_pattern) interval = DateInterval.from_periods(start, end) self.assertTrue(interval.contains(date)) - def assertExcludes(self, interval_pattern, date_pattern): + def assertExcludes(self, interval_pattern, date_pattern): # noqa date = _date(date_pattern) (start, end) = _parse_periods(interval_pattern) interval = DateInterval.from_periods(start, end) @@ -112,6 +115,14 @@ self.assertEqual(len(matched), 0) +class DateQueryConstructTest(unittest.TestCase): + def test_long_numbers(self): + DateQuery('added', '1409830085..1412422089') + + def test_too_many_components(self): + DateQuery('added', '12-34-56-78') + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_dbcore.py beets-1.3.19/test/test_dbcore.py --- beets-1.3.8+dfsg/test/test_dbcore.py 2014-09-16 02:41:48.000000000 +0000 +++ beets-1.3.19/test/test_dbcore.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,10 +15,14 @@ """Tests for the DBCore database abstraction. """ +from __future__ import division, absolute_import, print_function + import os +import shutil import sqlite3 -from _common import unittest +from test import _common +from test._common import unittest from beets import dbcore from tempfile import mkstemp @@ -112,15 +117,28 @@ pass +class TestModelWithGetters(dbcore.Model): + + @classmethod + def _getters(cls): + return {'aComputedField': (lambda s: 'thing')} + + def _template_funcs(self): + return {} + + +@_common.slow_test() class MigrationTest(unittest.TestCase): """Tests the ability to change the database schema between versions. """ - def setUp(self): - handle, self.libfile = mkstemp('db') + + @classmethod + def setUpClass(cls): + handle, cls.orig_libfile = mkstemp('orig_db') os.close(handle) # Set up a database with the two-field schema. - old_lib = TestDatabase2(self.libfile) + old_lib = TestDatabase2(cls.orig_libfile) # Add an item to the old library. old_lib._connection().execute( @@ -129,6 +147,15 @@ old_lib._connection().commit() del old_lib + @classmethod + def tearDownClass(cls): + os.remove(cls.orig_libfile) + + def setUp(self): + handle, self.libfile = mkstemp('db') + os.close(handle) + shutil.copyfile(self.orig_libfile, self.libfile) + def tearDown(self): os.remove(self.libfile) @@ -270,6 +297,40 @@ model2.load() self.assertNotIn('flex_field', model2) + def test_check_db_fails(self): + with self.assertRaisesRegexp(ValueError, 'no database'): + dbcore.Model()._check_db() + with self.assertRaisesRegexp(ValueError, 'no id'): + TestModel1(self.db)._check_db() + + dbcore.Model(self.db)._check_db(need_id=False) + + def test_missing_field(self): + with self.assertRaises(AttributeError): + TestModel1(self.db).nonExistingKey + + def test_computed_field(self): + model = TestModelWithGetters() + self.assertEqual(model.aComputedField, 'thing') + with self.assertRaisesRegexp(KeyError, u'computed field .+ deleted'): + del model.aComputedField + + def test_items(self): + model = TestModel1(self.db) + model.id = 5 + self.assertEqual({('id', 5), ('field_one', None)}, + set(model.items())) + + def test_delete_internal_field(self): + model = dbcore.Model() + del model._db + with self.assertRaises(AttributeError): + model._db + + def test_parse_nonstring(self): + with self.assertRaisesRegexp(TypeError, u"must be a string"): + dbcore.Model._parse(None, 42) + class FormatTest(unittest.TestCase): def test_format_fixed_field(self): @@ -348,7 +409,7 @@ part, {'year': dbcore.query.NumericQuery}, {':': dbcore.query.RegexpQuery}, - ) + )[:-1] # remove the negate flag def test_one_basic_term(self): q = 'test' @@ -431,6 +492,10 @@ q = self.qfs(['some_float_field:2..3']) self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery) + def test_empty_query_part(self): + q = self.qfs(['']) + self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery) + class SortFromStringsTest(unittest.TestCase): def sfs(self, strings): @@ -442,6 +507,7 @@ def test_zero_parts(self): s = self.sfs([]) self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(s, dbcore.query.NullSort()) def test_one_parts(self): s = self.sfs(['field+']) @@ -454,17 +520,139 @@ def test_fixed_field_sort(self): s = self.sfs(['field_one+']) - self.assertIsInstance(s, dbcore.query.MultipleSort) - self.assertIsInstance(s.sorts[0], dbcore.query.FixedFieldSort) + self.assertIsInstance(s, dbcore.query.FixedFieldSort) + self.assertEqual(s, dbcore.query.FixedFieldSort('field_one')) def test_flex_field_sort(self): s = self.sfs(['flex_field+']) - self.assertIsInstance(s, dbcore.query.MultipleSort) - self.assertIsInstance(s.sorts[0], dbcore.query.SlowFieldSort) + self.assertIsInstance(s, dbcore.query.SlowFieldSort) + self.assertEqual(s, dbcore.query.SlowFieldSort('flex_field')) def test_special_sort(self): s = self.sfs(['some_sort+']) - self.assertIsInstance(s.sorts[0], TestSort) + self.assertIsInstance(s, TestSort) + + +class ParseSortedQueryTest(unittest.TestCase): + def psq(self, parts): + return dbcore.parse_sorted_query( + TestModel1, + parts.split(), + ) + + def test_and_query(self): + q, s = self.psq('foo bar') + self.assertIsInstance(q, dbcore.query.AndQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 2) + + def test_or_query(self): + q, s = self.psq('foo , bar') + self.assertIsInstance(q, dbcore.query.OrQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 2) + + def test_no_space_before_comma_or_query(self): + q, s = self.psq('foo, bar') + self.assertIsInstance(q, dbcore.query.OrQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 2) + + def test_no_spaces_or_query(self): + q, s = self.psq('foo,bar') + self.assertIsInstance(q, dbcore.query.AndQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 1) + + def test_trailing_comma_or_query(self): + q, s = self.psq('foo , bar ,') + self.assertIsInstance(q, dbcore.query.OrQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 3) + + def test_leading_comma_or_query(self): + q, s = self.psq(', foo , bar') + self.assertIsInstance(q, dbcore.query.OrQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 3) + + def test_only_direction(self): + q, s = self.psq('-') + self.assertIsInstance(q, dbcore.query.AndQuery) + self.assertIsInstance(s, dbcore.query.NullSort) + self.assertEqual(len(q.subqueries), 1) + + +class ResultsIteratorTest(unittest.TestCase): + def setUp(self): + self.db = TestDatabase1(':memory:') + model = TestModel1() + model['foo'] = 'baz' + model.add(self.db) + model = TestModel1() + model['foo'] = 'bar' + model.add(self.db) + + def tearDown(self): + self.db._connection().close() + + def test_iterate_once(self): + objs = self.db._fetch(TestModel1) + self.assertEqual(len(list(objs)), 2) + + def test_iterate_twice(self): + objs = self.db._fetch(TestModel1) + list(objs) + self.assertEqual(len(list(objs)), 2) + + def test_concurrent_iterators(self): + results = self.db._fetch(TestModel1) + it1 = iter(results) + it2 = iter(results) + next(it1) + list(it2) + self.assertEqual(len(list(it1)), 1) + + def test_slow_query(self): + q = dbcore.query.SubstringQuery('foo', 'ba', False) + objs = self.db._fetch(TestModel1, q) + self.assertEqual(len(list(objs)), 2) + + def test_slow_query_negative(self): + q = dbcore.query.SubstringQuery('foo', 'qux', False) + objs = self.db._fetch(TestModel1, q) + self.assertEqual(len(list(objs)), 0) + + def test_iterate_slow_sort(self): + s = dbcore.query.SlowFieldSort('foo') + res = self.db._fetch(TestModel1, sort=s) + objs = list(res) + self.assertEqual(objs[0].foo, 'bar') + self.assertEqual(objs[1].foo, 'baz') + + def test_unsorted_subscript(self): + objs = self.db._fetch(TestModel1) + self.assertEqual(objs[0].foo, 'baz') + self.assertEqual(objs[1].foo, 'bar') + + def test_slow_sort_subscript(self): + s = dbcore.query.SlowFieldSort('foo') + objs = self.db._fetch(TestModel1, sort=s) + self.assertEqual(objs[0].foo, 'bar') + self.assertEqual(objs[1].foo, 'baz') + + def test_length(self): + objs = self.db._fetch(TestModel1) + self.assertEqual(len(objs), 2) + + def test_out_of_range(self): + objs = self.db._fetch(TestModel1) + with self.assertRaises(IndexError): + objs[100] + + def test_no_results(self): + self.assertIsNone(self.db._fetch( + TestModel1, dbcore.query.FalseQuery()).get()) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_echonest.py beets-1.3.19/test/test_echonest.py --- beets-1.3.8+dfsg/test/test_echonest.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/test/test_echonest.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -# This file is part of beets. -# Copyright 2014, Thomas Scholtes -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - - -from mock import Mock, patch - -from _common import unittest -from helper import TestHelper - -from beets.library import Item - - -class EchonestCliTest(unittest.TestCase, TestHelper): - def setUp(self): - try: - __import__('pyechonest') - except ImportError: - self.skipTest('pyechonest not available') - - self.setup_beets() - self.load_plugins('echonest') - - def tearDown(self): - self.teardown_beets() - self.unload_plugins() - - @patch.object(Item, 'write') - @patch('pyechonest.song.profile') - @patch('pyechonest.track.track_from_id') - def test_store_data(self, echonest_track, echonest_profile, item_write): - profile = Mock( - artist_name='artist', - title='title', - id='echonestid', - audio_summary={ - 'duration': 10, - 'energy': 0.5, - 'liveness': 0.5, - 'loudness': 0.5, - 'speechiness': 0.5, - 'danceability': 0.5, - 'tempo': 120, - 'key': 2, - 'mode': 0 - }, - ) - echonest_profile.return_value = [profile] - echonest_track.return_value = Mock(song_id='echonestid') - - item = Item( - mb_trackid='01234', - artist='artist', - title='title', - length=10, - ) - item.add(self.lib) - self.assertNotIn('danceability', item) - self.assertNotIn('initialkey', item) - - self.run_command('echonest') - item.load() - self.assertEqual(item['danceability'], 0.5) - self.assertEqual(item['liveness'], 0.5) - self.assertEqual(item['bpm'], 120) - self.assertEqual(item['initial_key'], 'C#m') - - def test_custom_field_range_query(self): - item = Item(liveness=2.2) - item.add(self.lib) - item = self.lib.items('liveness:2.2..3').get() - self.assertEqual(item['liveness'], 2.2) - - -def suite(): - return unittest.TestLoader().loadTestsFromName(__name__) - -if __name__ == '__main__': - unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_edit.py beets-1.3.19/test/test_edit.py --- beets-1.3.8+dfsg/test/test_edit.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_edit.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,428 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson and Diego Moreda. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function +import codecs + +from mock import patch +from test import _common +from test._common import unittest +from test.helper import TestHelper, control_stdin +from test.test_ui_importer import TerminalImportSessionSetup +from test.test_importer import ImportHelper, AutotagStub +from beets.library import Item +from beetsplug.edit import EditPlugin + + +class ModifyFileMocker(object): + """Helper for modifying a file, replacing or editing its contents. Used for + mocking the calls to the external editor during testing. + """ + + def __init__(self, contents=None, replacements=None): + """ `self.contents` and `self.replacements` are initialized here, in + order to keep the rest of the functions of this class with the same + signature as `EditPlugin.get_editor()`, making mocking easier. + - `contents`: string with the contents of the file to be used for + `overwrite_contents()` + - `replacement`: dict with the in-place replacements to be used for + `replace_contents()`, in the form {'previous string': 'new string'} + + TODO: check if it can be solved more elegantly with a decorator + """ + self.contents = contents + self.replacements = replacements + self.action = self.overwrite_contents + if replacements: + self.action = self.replace_contents + + # The two methods below mock the `edit` utility function in the plugin. + + def overwrite_contents(self, filename, log): + """Modify `filename`, replacing its contents with `self.contents`. If + `self.contents` is empty, the file remains unchanged. + """ + if self.contents: + with codecs.open(filename, 'w', encoding='utf8') as f: + f.write(self.contents) + + def replace_contents(self, filename, log): + """Modify `filename`, reading its contents and replacing the strings + specified in `self.replacements`. + """ + with codecs.open(filename, 'r', encoding='utf8') as f: + contents = f.read() + for old, new_ in self.replacements.iteritems(): + contents = contents.replace(old, new_) + with codecs.open(filename, 'w', encoding='utf8') as f: + f.write(contents) + + +class EditMixin(object): + """Helper containing some common functionality used for the Edit tests.""" + def assertItemFieldsModified(self, library_items, items, fields=[], # noqa + allowed=['path']): + """Assert that items in the library (`lib_items`) have different values + on the specified `fields` (and *only* on those fields), compared to + `items`. + + An empty `fields` list results in asserting that no modifications have + been performed. `allowed` is a list of field changes that are ignored + (they may or may not have changed; the assertion doesn't care). + """ + for lib_item, item in zip(library_items, items): + diff_fields = [field for field in lib_item._fields + if lib_item[field] != item[field]] + self.assertEqual(set(diff_fields).difference(allowed), + set(fields)) + + def run_mocked_interpreter(self, modify_file_args={}, stdin=[]): + """Run the edit command during an import session, with mocked stdin and + yaml writing. + """ + m = ModifyFileMocker(**modify_file_args) + with patch('beetsplug.edit.edit', side_effect=m.action): + with control_stdin('\n'.join(stdin)): + self.importer.run() + + def run_mocked_command(self, modify_file_args={}, stdin=[], args=[]): + """Run the edit command, with mocked stdin and yaml writing, and + passing `args` to `run_command`.""" + m = ModifyFileMocker(**modify_file_args) + with patch('beetsplug.edit.edit', side_effect=m.action): + with control_stdin('\n'.join(stdin)): + self.run_command('edit', *args) + + +@_common.slow_test() +class EditCommandTest(unittest.TestCase, TestHelper, EditMixin): + """Black box tests for `beetsplug.edit`. Command line interaction is + simulated using `test.helper.control_stdin()`, and yaml editing via an + external editor is simulated using `ModifyFileMocker`. + """ + ALBUM_COUNT = 1 + TRACK_COUNT = 10 + + def setUp(self): + self.setup_beets() + self.load_plugins('edit') + # Add an album, storing the original fields for comparison. + self.album = self.add_album_fixture(track_count=self.TRACK_COUNT) + self.album_orig = {f: self.album[f] for f in self.album._fields} + self.items_orig = [{f: item[f] for f in item._fields} for + item in self.album.items()] + + # Keep track of write()s. + self.write_patcher = patch('beets.library.Item.write') + self.mock_write = self.write_patcher.start() + + def tearDown(self): + EditPlugin.listeners = None + self.write_patcher.stop() + self.teardown_beets() + self.unload_plugins() + + def assertCounts(self, album_count=ALBUM_COUNT, track_count=TRACK_COUNT, # noqa + write_call_count=TRACK_COUNT, title_starts_with=''): + """Several common assertions on Album, Track and call counts.""" + self.assertEqual(len(self.lib.albums()), album_count) + self.assertEqual(len(self.lib.items()), track_count) + self.assertEqual(self.mock_write.call_count, write_call_count) + self.assertTrue(all(i.title.startswith(title_starts_with) + for i in self.lib.items())) + + def test_title_edit_discard(self): + """Edit title for all items in the library, then discard changes.""" + # Edit track titles. + self.run_mocked_command({'replacements': {u't\u00eftle': + u'modified t\u00eftle'}}, + # Cancel. + ['c']) + + self.assertCounts(write_call_count=0, + title_starts_with=u't\u00eftle') + self.assertItemFieldsModified(self.album.items(), self.items_orig, []) + + def test_title_edit_apply(self): + """Edit title for all items in the library, then apply changes.""" + # Edit track titles. + self.run_mocked_command({'replacements': {u't\u00eftle': + u'modified t\u00eftle'}}, + # Apply changes. + ['a']) + + self.assertCounts(write_call_count=self.TRACK_COUNT, + title_starts_with=u'modified t\u00eftle') + self.assertItemFieldsModified(self.album.items(), self.items_orig, + ['title']) + + def test_single_title_edit_apply(self): + """Edit title for one item in the library, then apply changes.""" + # Edit one track title. + self.run_mocked_command({'replacements': {u't\u00eftle 9': + u'modified t\u00eftle 9'}}, + # Apply changes. + ['a']) + + self.assertCounts(write_call_count=1,) + # No changes except on last item. + self.assertItemFieldsModified(list(self.album.items())[:-1], + self.items_orig[:-1], []) + self.assertEqual(list(self.album.items())[-1].title, + u'modified t\u00eftle 9') + + def test_noedit(self): + """Do not edit anything.""" + # Do not edit anything. + self.run_mocked_command({'contents': None}, + # No stdin. + []) + + self.assertCounts(write_call_count=0, + title_starts_with=u't\u00eftle') + self.assertItemFieldsModified(self.album.items(), self.items_orig, []) + + def test_album_edit_apply(self): + """Edit the album field for all items in the library, apply changes. + By design, the album should not be updated."" + """ + # Edit album. + self.run_mocked_command({'replacements': {u'\u00e4lbum': + u'modified \u00e4lbum'}}, + # Apply changes. + ['a']) + + self.assertCounts(write_call_count=self.TRACK_COUNT) + self.assertItemFieldsModified(self.album.items(), self.items_orig, + ['album']) + # Ensure album is *not* modified. + self.album.load() + self.assertEqual(self.album.album, u'\u00e4lbum') + + def test_single_edit_add_field(self): + """Edit the yaml file appending an extra field to the first item, then + apply changes.""" + # Append "foo: bar" to item with id == 1. + self.run_mocked_command({'replacements': {u"id: 1": + u"id: 1\nfoo: bar"}}, + # Apply changes. + ['a']) + + self.assertEqual(self.lib.items(u'id:1')[0].foo, 'bar') + self.assertCounts(write_call_count=1, + title_starts_with=u't\u00eftle') + + def test_a_album_edit_apply(self): + """Album query (-a), edit album field, apply changes.""" + self.run_mocked_command({'replacements': {u'\u00e4lbum': + u'modified \u00e4lbum'}}, + # Apply changes. + ['a'], + args=['-a']) + + self.album.load() + self.assertCounts(write_call_count=self.TRACK_COUNT) + self.assertEqual(self.album.album, u'modified \u00e4lbum') + self.assertItemFieldsModified(self.album.items(), self.items_orig, + ['album']) + + def test_a_albumartist_edit_apply(self): + """Album query (-a), edit albumartist field, apply changes.""" + self.run_mocked_command({'replacements': {u'album artist': + u'modified album artist'}}, + # Apply changes. + ['a'], + args=['-a']) + + self.album.load() + self.assertCounts(write_call_count=self.TRACK_COUNT) + self.assertEqual(self.album.albumartist, u'the modified album artist') + self.assertItemFieldsModified(self.album.items(), self.items_orig, + ['albumartist']) + + def test_malformed_yaml(self): + """Edit the yaml file incorrectly (resulting in a malformed yaml + document).""" + # Edit the yaml file to an invalid file. + self.run_mocked_command({'contents': '!MALFORMED'}, + # Edit again to fix? No. + ['n']) + + self.assertCounts(write_call_count=0, + title_starts_with=u't\u00eftle') + + def test_invalid_yaml(self): + """Edit the yaml file incorrectly (resulting in a well-formed but + invalid yaml document).""" + # Edit the yaml file to an invalid but parseable file. + self.run_mocked_command({'contents': u'wellformed: yes, but invalid'}, + # No stdin. + []) + + self.assertCounts(write_call_count=0, + title_starts_with=u't\u00eftle') + + +@_common.slow_test() +class EditDuringImporterTest(TerminalImportSessionSetup, unittest.TestCase, + ImportHelper, TestHelper, EditMixin): + """TODO + """ + IGNORED = ['added', 'album_id', 'id', 'mtime', 'path'] + + def setUp(self): + self.setup_beets() + self.load_plugins('edit') + # Create some mediafiles, and store them for comparison. + self._create_import_dir(3) + self.items_orig = [Item.from_path(f.path) for f in self.media_files] + self.matcher = AutotagStub().install() + self.matcher.matching = AutotagStub.GOOD + self.config['import']['timid'] = True + + def tearDown(self): + EditPlugin.listeners = None + self.unload_plugins() + self.teardown_beets() + self.matcher.restore() + + def test_edit_apply_asis(self): + """Edit the album field for all items in the library, apply changes, + using the original item tags. + """ + self._setup_import_session() + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Tag Title': + u'Edited Title'}}, + # eDit, Apply changes. + ['d', 'a']) + + # Check that only the 'title' field is modified. + self.assertItemFieldsModified(self.lib.items(), self.items_orig, + ['title'], + self.IGNORED + ['albumartist', + 'mb_albumartistid']) + self.assertTrue(all('Edited Title' in i.title + for i in self.lib.items())) + + # Ensure album is *not* fetched from a candidate. + self.assertEqual(self.lib.albums()[0].mb_albumid, u'') + + def test_edit_discard_asis(self): + """Edit the album field for all items in the library, discard changes, + using the original item tags. + """ + self._setup_import_session() + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Tag Title': + u'Edited Title'}}, + # eDit, Cancel, Use as-is. + ['d', 'c', 'u']) + + # Check that nothing is modified, the album is imported ASIS. + self.assertItemFieldsModified(self.lib.items(), self.items_orig, + [], + self.IGNORED + ['albumartist', + 'mb_albumartistid']) + self.assertTrue(all('Tag Title' in i.title + for i in self.lib.items())) + + # Ensure album is *not* fetched from a candidate. + self.assertEqual(self.lib.albums()[0].mb_albumid, u'') + + def test_edit_apply_candidate(self): + """Edit the album field for all items in the library, apply changes, + using a candidate. + """ + self._setup_import_session() + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Applied Title': + u'Edited Title'}}, + # edit Candidates, 1, Apply changes. + ['c', '1', 'a']) + + # Check that 'title' field is modified, and other fields come from + # the candidate. + self.assertTrue(all('Edited Title ' in i.title + for i in self.lib.items())) + self.assertTrue(all('match ' in i.mb_trackid + for i in self.lib.items())) + + # Ensure album is fetched from a candidate. + self.assertIn('albumid', self.lib.albums()[0].mb_albumid) + + def test_edit_discard_candidate(self): + """Edit the album field for all items in the library, discard changes, + using a candidate. + """ + self._setup_import_session() + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Applied Title': + u'Edited Title'}}, + # edit Candidates, 1, Apply changes. + ['c', '1', 'a']) + + # Check that 'title' field is modified, and other fields come from + # the candidate. + self.assertTrue(all('Edited Title ' in i.title + for i in self.lib.items())) + self.assertTrue(all('match ' in i.mb_trackid + for i in self.lib.items())) + + # Ensure album is fetched from a candidate. + self.assertIn('albumid', self.lib.albums()[0].mb_albumid) + + def test_edit_apply_asis_singleton(self): + """Edit the album field for all items in the library, apply changes, + using the original item tags and singleton mode. + """ + self._setup_import_session(singletons=True) + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Tag Title': + u'Edited Title'}}, + # eDit, Apply changes, aBort. + ['d', 'a', 'b']) + + # Check that only the 'title' field is modified. + self.assertItemFieldsModified(self.lib.items(), self.items_orig, + ['title'], + self.IGNORED + ['albumartist', + 'mb_albumartistid']) + self.assertTrue(all('Edited Title' in i.title + for i in self.lib.items())) + + def test_edit_apply_candidate_singleton(self): + """Edit the album field for all items in the library, apply changes, + using a candidate and singleton mode. + """ + self._setup_import_session() + # Edit track titles. + self.run_mocked_interpreter({'replacements': {u'Applied Title': + u'Edited Title'}}, + # edit Candidates, 1, Apply changes, aBort. + ['c', '1', 'a', 'b']) + + # Check that 'title' field is modified, and other fields come from + # the candidate. + self.assertTrue(all('Edited Title ' in i.title + for i in self.lib.items())) + self.assertTrue(all('match ' in i.mb_trackid + for i in self.lib.items())) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_embedart.py beets-1.3.19/test/test_embedart.py --- beets-1.3.8+dfsg/test/test_embedart.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_embedart.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,22 +13,51 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + import os.path -import _common -from _common import unittest -from helper import TestHelper +import shutil +from mock import patch, MagicMock +import tempfile + +from test import _common +from test._common import unittest +from test.helper import TestHelper from beets.mediafile import MediaFile +from beets import config, logging, ui +from beets.util import syspath +from beets.util.artresizer import ArtResizer +from beets import art + + +def require_artresizer_compare(test): + + def wrapper(*args, **kwargs): + if not ArtResizer.shared.can_compare: + raise unittest.SkipTest("compare not available") + else: + return test(*args, **kwargs) + wrapper.__name__ = test.__name__ + return wrapper -class EmbedartCliTest(unittest.TestCase, TestHelper): - artpath = os.path.join(_common.RSRC, 'image-2x3.jpg') +class EmbedartCliTest(_common.TestCase, TestHelper): + + small_artpath = os.path.join(_common.RSRC, b'image-2x3.jpg') + abbey_artpath = os.path.join(_common.RSRC, b'abbey.jpg') + abbey_similarpath = os.path.join(_common.RSRC, b'abbey-similar.jpg') + abbey_differentpath = os.path.join(_common.RSRC, b'abbey-different.jpg') def setUp(self): self.setup_beets() # Converter is threaded self.load_plugins('embedart') - with open(self.artpath) as f: + + def _setup_data(self, artpath=None): + if not artpath: + artpath = self.small_artpath + with open(syspath(artpath), 'rb') as f: self.image_data = f.read() def tearDown(self): @@ -35,22 +65,165 @@ self.teardown_beets() def test_embed_art_from_file(self): + self._setup_data() album = self.add_album_fixture() item = album.items()[0] - self.run_command('embedart', '-f', self.artpath) - mediafile = MediaFile(item.path) + self.run_command('embedart', '-f', self.small_artpath) + mediafile = MediaFile(syspath(item.path)) self.assertEqual(mediafile.images[0].data, self.image_data) def test_embed_art_from_album(self): + self._setup_data() album = self.add_album_fixture() item = album.items()[0] - - album.artpath = self.artpath + album.artpath = self.small_artpath album.store() self.run_command('embedart') - mediafile = MediaFile(item.path) + mediafile = MediaFile(syspath(item.path)) self.assertEqual(mediafile.images[0].data, self.image_data) + def test_embed_art_remove_art_file(self): + self._setup_data() + album = self.add_album_fixture() + + logging.getLogger('beets.embedart').setLevel(logging.DEBUG) + + handle, tmp_path = tempfile.mkstemp() + os.write(handle, self.image_data) + os.close(handle) + + album.artpath = tmp_path + album.store() + + config['embedart']['remove_art_file'] = True + self.run_command('embedart') + + if os.path.isfile(tmp_path): + os.remove(tmp_path) + self.fail(u'Artwork file {0} was not deleted'.format(tmp_path)) + + def test_art_file_missing(self): + self.add_album_fixture() + logging.getLogger('beets.embedart').setLevel(logging.DEBUG) + with self.assertRaises(ui.UserError): + self.run_command('embedart', '-f', '/doesnotexist') + + def test_embed_non_image_file(self): + album = self.add_album_fixture() + logging.getLogger('beets.embedart').setLevel(logging.DEBUG) + + handle, tmp_path = tempfile.mkstemp() + os.write(handle, b'I am not an image.') + os.close(handle) + + try: + self.run_command('embedart', '-f', tmp_path) + finally: + os.remove(tmp_path) + + mediafile = MediaFile(syspath(album.items()[0].path)) + self.assertFalse(mediafile.images) # No image added. + + @require_artresizer_compare + def test_reject_different_art(self): + self._setup_data(self.abbey_artpath) + album = self.add_album_fixture() + item = album.items()[0] + self.run_command('embedart', '-f', self.abbey_artpath) + config['embedart']['compare_threshold'] = 20 + self.run_command('embedart', '-f', self.abbey_differentpath) + mediafile = MediaFile(syspath(item.path)) + + self.assertEqual(mediafile.images[0].data, self.image_data, + u'Image written is not {0}'.format( + self.abbey_artpath)) + + @require_artresizer_compare + def test_accept_similar_art(self): + self._setup_data(self.abbey_similarpath) + album = self.add_album_fixture() + item = album.items()[0] + self.run_command('embedart', '-f', self.abbey_artpath) + config['embedart']['compare_threshold'] = 20 + self.run_command('embedart', '-f', self.abbey_similarpath) + mediafile = MediaFile(syspath(item.path)) + + self.assertEqual(mediafile.images[0].data, self.image_data, + u'Image written is not {0}'.format( + self.abbey_similarpath)) + + def test_non_ascii_album_path(self): + resource_path = os.path.join(_common.RSRC, b'image.mp3') + album = self.add_album_fixture() + trackpath = album.items()[0].path + albumpath = album.path + shutil.copy(syspath(resource_path), syspath(trackpath)) + + self.run_command('extractart', '-n', 'extracted') + + self.assertExists(os.path.join(albumpath, b'extracted.png')) + + +@patch('beets.art.subprocess') +@patch('beets.art.extract') +class ArtSimilarityTest(unittest.TestCase): + def setUp(self): + self.item = _common.item() + self.log = logging.getLogger('beets.embedart') + + def _similarity(self, threshold): + return art.check_art_similarity(self.log, self.item, b'path', + threshold) + + def _popen(self, status=0, stdout="", stderr=""): + """Create a mock `Popen` object.""" + popen = MagicMock(returncode=status) + popen.communicate.return_value = stdout, stderr + return popen + + def _mock_popens(self, mock_extract, mock_subprocess, compare_status=0, + compare_stdout="", compare_stderr="", convert_status=0): + mock_extract.return_value = b'extracted_path' + mock_subprocess.Popen.side_effect = [ + # The `convert` call. + self._popen(convert_status), + # The `compare` call. + self._popen(compare_status, compare_stdout, compare_stderr), + ] + + def test_compare_success_similar(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 0, "10", "err") + self.assertTrue(self._similarity(20)) + + def test_compare_success_different(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 0, "10", "err") + self.assertFalse(self._similarity(5)) + + def test_compare_status1_similar(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 1, "out", "10") + self.assertTrue(self._similarity(20)) + + def test_compare_status1_different(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 1, "out", "10") + self.assertFalse(self._similarity(5)) + + def test_compare_failed(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 2, "out", "10") + self.assertIsNone(self._similarity(20)) + + def test_compare_parsing_error(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 0, "foo", "bar") + self.assertIsNone(self._similarity(20)) + + def test_compare_parsing_error_and_failure(self, mock_extract, + mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, 1, "foo", "bar") + self.assertIsNone(self._similarity(20)) + + def test_convert_failure(self, mock_extract, mock_subprocess): + self._mock_popens(mock_extract, mock_subprocess, convert_status=1) + self.assertIsNone(self._similarity(20)) + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_embyupdate.py beets-1.3.19/test/test_embyupdate.py --- beets-1.3.8+dfsg/test/test_embyupdate.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_embyupdate.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import TestHelper +from beetsplug import embyupdate +import responses + + +class EmbyUpdateTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + self.load_plugins('embyupdate') + + self.config['emby'] = { + u'host': u'localhost', + u'port': 8096, + u'username': u'username', + u'password': u'password' + } + + def tearDown(self): + self.teardown_beets() + self.unload_plugins() + + def test_api_url(self): + self.assertEqual( + embyupdate.api_url(self.config['emby']['host'].get(), + self.config['emby']['port'].get(), + '/Library/Refresh'), + 'http://localhost:8096/Library/Refresh?format=json' + ) + + def test_password_data(self): + self.assertEqual( + embyupdate.password_data(self.config['emby']['username'].get(), + self.config['emby']['password'].get()), + { + 'username': 'username', + 'password': '5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', + 'passwordMd5': '5f4dcc3b5aa765d61d8327deb882cf99' + } + ) + + def test_create_header_no_token(self): + self.assertEqual( + embyupdate.create_headers('e8837bc1-ad67-520e-8cd2-f629e3155721'), + { + 'Authorization': 'MediaBrowser', + 'UserId': 'e8837bc1-ad67-520e-8cd2-f629e3155721', + 'Client': 'other', + 'Device': 'empy', + 'DeviceId': 'beets', + 'Version': '0.0.0' + } + ) + + def test_create_header_with_token(self): + self.assertEqual( + embyupdate.create_headers('e8837bc1-ad67-520e-8cd2-f629e3155721', + token='abc123'), + { + 'Authorization': 'MediaBrowser', + 'UserId': 'e8837bc1-ad67-520e-8cd2-f629e3155721', + 'Client': 'other', + 'Device': 'empy', + 'DeviceId': 'beets', + 'Version': '0.0.0', + 'X-MediaBrowser-Token': 'abc123' + } + ) + + @responses.activate + def test_get_token(self): + body = ('{"User":{"Name":"username", ' + '"ServerId":"1efa5077976bfa92bc71652404f646ec",' + '"Id":"2ec276a2642e54a19b612b9418a8bd3b","HasPassword":true,' + '"HasConfiguredPassword":true,' + '"HasConfiguredEasyPassword":false,' + '"LastLoginDate":"2015-11-09T08:35:03.6357440Z",' + '"LastActivityDate":"2015-11-09T08:35:03.6665060Z",' + '"Configuration":{"AudioLanguagePreference":"",' + '"PlayDefaultAudioTrack":true,"SubtitleLanguagePreference":"",' + '"DisplayMissingEpisodes":false,' + '"DisplayUnairedEpisodes":false,' + '"GroupMoviesIntoBoxSets":false,' + '"DisplayChannelsWithinViews":[],' + '"ExcludeFoldersFromGrouping":[],"GroupedFolders":[],' + '"SubtitleMode":"Default","DisplayCollectionsView":true,' + '"DisplayFoldersView":false,"EnableLocalPassword":false,' + '"OrderedViews":[],"IncludeTrailersInSuggestions":true,' + '"EnableCinemaMode":true,"LatestItemsExcludes":[],' + '"PlainFolderViews":[],"HidePlayedInLatest":true,' + '"DisplayChannelsInline":false},' + '"Policy":{"IsAdministrator":true,"IsHidden":false,' + '"IsDisabled":false,"BlockedTags":[],' + '"EnableUserPreferenceAccess":true,"AccessSchedules":[],' + '"BlockUnratedItems":[],' + '"EnableRemoteControlOfOtherUsers":false,' + '"EnableSharedDeviceControl":true,' + '"EnableLiveTvManagement":true,"EnableLiveTvAccess":true,' + '"EnableMediaPlayback":true,' + '"EnableAudioPlaybackTranscoding":true,' + '"EnableVideoPlaybackTranscoding":true,' + '"EnableContentDeletion":false,' + '"EnableContentDownloading":true,"EnableSync":true,' + '"EnableSyncTranscoding":true,"EnabledDevices":[],' + '"EnableAllDevices":true,"EnabledChannels":[],' + '"EnableAllChannels":true,"EnabledFolders":[],' + '"EnableAllFolders":true,"InvalidLoginAttemptCount":0,' + '"EnablePublicSharing":true}},' + '"SessionInfo":{"SupportedCommands":[],' + '"QueueableMediaTypes":[],"PlayableMediaTypes":[],' + '"Id":"89f3b33f8b3a56af22088733ad1d76b3",' + '"UserId":"2ec276a2642e54a19b612b9418a8bd3b",' + '"UserName":"username","AdditionalUsers":[],' + '"ApplicationVersion":"Unknown version",' + '"Client":"Unknown app",' + '"LastActivityDate":"2015-11-09T08:35:03.6665060Z",' + '"DeviceName":"Unknown device","DeviceId":"Unknown device id",' + '"SupportsRemoteControl":false,"PlayState":{"CanSeek":false,' + '"IsPaused":false,"IsMuted":false,"RepeatMode":"RepeatNone"}},' + '"AccessToken":"4b19180cf02748f7b95c7e8e76562fc8",' + '"ServerId":"1efa5077976bfa92bc71652404f646ec"}') + + responses.add(responses.POST, + ('http://localhost:8096' + '/Users/AuthenticateByName'), + body=body, + status=200, + content_type='application/json') + + headers = { + 'Authorization': 'MediaBrowser', + 'UserId': 'e8837bc1-ad67-520e-8cd2-f629e3155721', + 'Client': 'other', + 'Device': 'empy', + 'DeviceId': 'beets', + 'Version': '0.0.0' + } + + auth_data = { + 'username': 'username', + 'password': '5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', + 'passwordMd5': '5f4dcc3b5aa765d61d8327deb882cf99' + } + + self.assertEqual( + embyupdate.get_token('localhost', 8096, headers, auth_data), + '4b19180cf02748f7b95c7e8e76562fc8') + + @responses.activate + def test_get_user(self): + body = ('[{"Name":"username",' + '"ServerId":"1efa5077976bfa92bc71652404f646ec",' + '"Id":"2ec276a2642e54a19b612b9418a8bd3b","HasPassword":true,' + '"HasConfiguredPassword":true,' + '"HasConfiguredEasyPassword":false,' + '"LastLoginDate":"2015-11-09T08:35:03.6357440Z",' + '"LastActivityDate":"2015-11-09T08:42:39.3693220Z",' + '"Configuration":{"AudioLanguagePreference":"",' + '"PlayDefaultAudioTrack":true,"SubtitleLanguagePreference":"",' + '"DisplayMissingEpisodes":false,' + '"DisplayUnairedEpisodes":false,' + '"GroupMoviesIntoBoxSets":false,' + '"DisplayChannelsWithinViews":[],' + '"ExcludeFoldersFromGrouping":[],"GroupedFolders":[],' + '"SubtitleMode":"Default","DisplayCollectionsView":true,' + '"DisplayFoldersView":false,"EnableLocalPassword":false,' + '"OrderedViews":[],"IncludeTrailersInSuggestions":true,' + '"EnableCinemaMode":true,"LatestItemsExcludes":[],' + '"PlainFolderViews":[],"HidePlayedInLatest":true,' + '"DisplayChannelsInline":false},' + '"Policy":{"IsAdministrator":true,"IsHidden":false,' + '"IsDisabled":false,"BlockedTags":[],' + '"EnableUserPreferenceAccess":true,"AccessSchedules":[],' + '"BlockUnratedItems":[],' + '"EnableRemoteControlOfOtherUsers":false,' + '"EnableSharedDeviceControl":true,' + '"EnableLiveTvManagement":true,"EnableLiveTvAccess":true,' + '"EnableMediaPlayback":true,' + '"EnableAudioPlaybackTranscoding":true,' + '"EnableVideoPlaybackTranscoding":true,' + '"EnableContentDeletion":false,' + '"EnableContentDownloading":true,' + '"EnableSync":true,"EnableSyncTranscoding":true,' + '"EnabledDevices":[],"EnableAllDevices":true,' + '"EnabledChannels":[],"EnableAllChannels":true,' + '"EnabledFolders":[],"EnableAllFolders":true,' + '"InvalidLoginAttemptCount":0,"EnablePublicSharing":true}}]') + + responses.add(responses.GET, + 'http://localhost:8096/Users/Public', + body=body, + status=200, + content_type='application/json') + + response = embyupdate.get_user('localhost', 8096, 'username') + + self.assertEqual(response[0]['Id'], + '2ec276a2642e54a19b612b9418a8bd3b') + + self.assertEqual(response[0]['Name'], + 'username') + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_fetchart.py beets-1.3.19/test/test_fetchart.py --- beets-1.3.8+dfsg/test/test_fetchart.py 2014-09-14 00:19:04.000000000 +0000 +++ beets-1.3.19/test/test_fetchart.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,9 +13,12 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -import os.path -from _common import unittest -from helper import TestHelper +from __future__ import division, absolute_import, print_function + +import os +from test._common import unittest +from test.helper import TestHelper +from beets import util class FetchartCliTest(unittest.TestCase, TestHelper): @@ -22,25 +26,31 @@ def setUp(self): self.setup_beets() self.load_plugins('fetchart') + self.config['fetchart']['cover_names'] = 'c\xc3\xb6ver.jpg' + self.config['art_filename'] = 'mycover' + self.album = self.add_album() def tearDown(self): self.unload_plugins() self.teardown_beets() def test_set_art_from_folder(self): - self.config['fetchart']['cover_names'] = 'c\xc3\xb6ver.jpg' - self.config['art_filename'] = 'mycover' - album = self.add_album() - self.touch('c\xc3\xb6ver.jpg', dir=album.path, content='IMAGE') + self.touch(b'c\xc3\xb6ver.jpg', dir=self.album.path, content='IMAGE') self.run_command('fetchart') - cover_path = os.path.join(album.path, 'mycover.jpg') + cover_path = os.path.join(self.album.path, b'mycover.jpg') - album.load() - self.assertEqual(album['artpath'], cover_path) - with open(cover_path, 'r') as f: + self.album.load() + self.assertEqual(self.album['artpath'], cover_path) + with open(util.syspath(cover_path), 'r') as f: self.assertEqual(f.read(), 'IMAGE') + def test_filesystem_does_not_pick_up_folder(self): + os.makedirs(os.path.join(self.album.path, b'mycover.jpg')) + self.run_command('fetchart') + self.album.load() + self.assertEqual(self.album['artpath'], None) + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_filefilter.py beets-1.3.19/test/test_filefilter.py --- beets-1.3.8+dfsg/test/test_filefilter.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_filefilter.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Malte Ried. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the `filefilter` plugin. +""" + +from __future__ import division, absolute_import, print_function + +import os +import shutil + +from test import _common +from test._common import unittest +from test.helper import capture_log +from test.test_importer import ImportHelper +from beets import config +from beets.mediafile import MediaFile +from beets.util import displayable_path, bytestring_path +from beetsplug.filefilter import FileFilterPlugin + + +class FileFilterPluginTest(unittest.TestCase, ImportHelper): + def setUp(self): + self.setup_beets() + self.__create_import_dir(2) + self._setup_import_session() + config['import']['pretend'] = True + + def tearDown(self): + self.teardown_beets() + + def __copy_file(self, dest_path, metadata): + # Copy files + resource_path = os.path.join(_common.RSRC, b'full.mp3') + shutil.copy(resource_path, dest_path) + medium = MediaFile(dest_path) + # Set metadata + for attr in metadata: + setattr(medium, attr, metadata[attr]) + medium.save() + + def __create_import_dir(self, count): + self.import_dir = os.path.join(self.temp_dir, b'testsrcdir') + if os.path.isdir(self.import_dir): + shutil.rmtree(self.import_dir) + + self.artist_path = os.path.join(self.import_dir, b'artist') + self.album_path = os.path.join(self.artist_path, b'album') + self.misc_path = os.path.join(self.import_dir, b'misc') + os.makedirs(self.album_path) + os.makedirs(self.misc_path) + + metadata = { + 'artist': 'Tag Artist', + 'album': 'Tag Album', + 'albumartist': None, + 'mb_trackid': None, + 'mb_albumid': None, + 'comp': None, + } + self.album_paths = [] + for i in range(count): + metadata['track'] = i + 1 + metadata['title'] = 'Tag Title Album %d' % (i + 1) + track_file = bytestring_path('%02d - track.mp3' % (i + 1)) + dest_path = os.path.join(self.album_path, track_file) + self.__copy_file(dest_path, metadata) + self.album_paths.append(dest_path) + + self.artist_paths = [] + metadata['album'] = None + for i in range(count): + metadata['track'] = i + 10 + metadata['title'] = 'Tag Title Artist %d' % (i + 1) + track_file = bytestring_path('track_%d.mp3' % (i + 1)) + dest_path = os.path.join(self.artist_path, track_file) + self.__copy_file(dest_path, metadata) + self.artist_paths.append(dest_path) + + self.misc_paths = [] + for i in range(count): + metadata['artist'] = 'Artist %d' % (i + 42) + metadata['track'] = i + 5 + metadata['title'] = 'Tag Title Misc %d' % (i + 1) + track_file = bytestring_path('track_%d.mp3' % (i + 1)) + dest_path = os.path.join(self.misc_path, track_file) + self.__copy_file(dest_path, metadata) + self.misc_paths.append(dest_path) + + def __run(self, expected_lines, singletons=False): + self.load_plugins('filefilter') + + import_files = [self.import_dir] + self._setup_import_session(singletons=singletons) + self.importer.paths = import_files + + with capture_log() as logs: + self.importer.run() + self.unload_plugins() + FileFilterPlugin.listeners = None + + logs = [line for line in logs if not line.startswith('Sending event:')] + + self.assertEqual(logs, expected_lines) + + def test_import_default(self): + """ The default configuration should import everything. + """ + self.__run([ + 'Album: %s' % displayable_path(self.artist_path), + ' %s' % displayable_path(self.artist_paths[0]), + ' %s' % displayable_path(self.artist_paths[1]), + 'Album: %s' % displayable_path(self.album_path), + ' %s' % displayable_path(self.album_paths[0]), + ' %s' % displayable_path(self.album_paths[1]), + 'Album: %s' % displayable_path(self.misc_path), + ' %s' % displayable_path(self.misc_paths[0]), + ' %s' % displayable_path(self.misc_paths[1]) + ]) + + def test_import_nothing(self): + config['filefilter']['path'] = 'not_there' + self.__run(['No files imported from %s' % displayable_path( + self.import_dir)]) + + # Global options + def test_import_global(self): + config['filefilter']['path'] = '.*track_1.*\.mp3' + self.__run([ + 'Album: %s' % displayable_path(self.artist_path), + ' %s' % displayable_path(self.artist_paths[0]), + 'Album: %s' % displayable_path(self.misc_path), + ' %s' % displayable_path(self.misc_paths[0]), + ]) + self.__run([ + 'Singleton: %s' % displayable_path(self.artist_paths[0]), + 'Singleton: %s' % displayable_path(self.misc_paths[0]) + ], singletons=True) + + # Album options + def test_import_album(self): + config['filefilter']['album_path'] = '.*track_1.*\.mp3' + self.__run([ + 'Album: %s' % displayable_path(self.artist_path), + ' %s' % displayable_path(self.artist_paths[0]), + 'Album: %s' % displayable_path(self.misc_path), + ' %s' % displayable_path(self.misc_paths[0]), + ]) + self.__run([ + 'Singleton: %s' % displayable_path(self.artist_paths[0]), + 'Singleton: %s' % displayable_path(self.artist_paths[1]), + 'Singleton: %s' % displayable_path(self.album_paths[0]), + 'Singleton: %s' % displayable_path(self.album_paths[1]), + 'Singleton: %s' % displayable_path(self.misc_paths[0]), + 'Singleton: %s' % displayable_path(self.misc_paths[1]) + ], singletons=True) + + # Singleton options + def test_import_singleton(self): + config['filefilter']['singleton_path'] = '.*track_1.*\.mp3' + self.__run([ + 'Singleton: %s' % displayable_path(self.artist_paths[0]), + 'Singleton: %s' % displayable_path(self.misc_paths[0]) + ], singletons=True) + self.__run([ + 'Album: %s' % displayable_path(self.artist_path), + ' %s' % displayable_path(self.artist_paths[0]), + ' %s' % displayable_path(self.artist_paths[1]), + 'Album: %s' % displayable_path(self.album_path), + ' %s' % displayable_path(self.album_paths[0]), + ' %s' % displayable_path(self.album_paths[1]), + 'Album: %s' % displayable_path(self.misc_path), + ' %s' % displayable_path(self.misc_paths[0]), + ' %s' % displayable_path(self.misc_paths[1]) + ]) + + # Album and singleton options + def test_import_both(self): + config['filefilter']['album_path'] = '.*track_1.*\.mp3' + config['filefilter']['singleton_path'] = '.*track_2.*\.mp3' + self.__run([ + 'Album: %s' % displayable_path(self.artist_path), + ' %s' % displayable_path(self.artist_paths[0]), + 'Album: %s' % displayable_path(self.misc_path), + ' %s' % displayable_path(self.misc_paths[0]), + ]) + self.__run([ + 'Singleton: %s' % displayable_path(self.artist_paths[1]), + 'Singleton: %s' % displayable_path(self.misc_paths[1]) + ], singletons=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_files.py beets-1.3.19/test/test_files.py --- beets-1.3.8+dfsg/test/test_files.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_files.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,14 +15,16 @@ """Test file manipulation functionality of Item. """ +from __future__ import division, absolute_import, print_function + import shutil import os import stat from os.path import join -import _common -from _common import unittest -from _common import item, touch +from test import _common +from test._common import unittest +from test._common import item, touch import beets.library from beets import util @@ -31,8 +34,8 @@ super(MoveTest, self).setUp() # make a temporary file - self.path = join(self.temp_dir, 'temp.mp3') - shutil.copy(join(_common.RSRC, 'full.mp3'), self.path) + self.path = join(self.temp_dir, b'temp.mp3') + shutil.copy(join(_common.RSRC, b'full.mp3'), self.path) # add it to a temporary library self.lib = beets.library.Library(':memory:') @@ -40,7 +43,7 @@ self.lib.add(self.i) # set up the destination - self.libdir = join(self.temp_dir, 'testlibdir') + self.libdir = join(self.temp_dir, b'testlibdir') os.mkdir(self.libdir) self.lib.directory = self.libdir self.lib.path_formats = [('default', @@ -48,9 +51,9 @@ self.i.artist = 'one' self.i.album = 'two' self.i.title = 'three' - self.dest = join(self.libdir, 'one', 'two', 'three.mp3') + self.dest = join(self.libdir, b'one', b'two', b'three.mp3') - self.otherdir = join(self.temp_dir, 'testotherdir') + self.otherdir = join(self.temp_dir, b'testotherdir') def test_move_arrives(self): self.i.move() @@ -58,7 +61,7 @@ def test_move_to_custom_dir(self): self.i.move(basedir=self.otherdir) - self.assertExists(join(self.otherdir, 'one', 'two', 'three.mp3')) + self.assertExists(join(self.otherdir, b'one', b'two', b'three.mp3')) def test_move_departs(self): self.i.move() @@ -69,7 +72,7 @@ old_path = self.i.path self.assertExists(old_path) - self.i.artist = 'newArtist' + self.i.artist = u'newArtist' self.i.move() self.assertNotExists(old_path) self.assertNotExists(os.path.dirname(old_path)) @@ -100,15 +103,15 @@ def test_read_only_file_copied_writable(self): # Make the source file read-only. - os.chmod(self.path, 0444) + os.chmod(self.path, 0o444) try: self.i.move(copy=True) self.assertTrue(os.access(self.i.path, os.W_OK)) finally: # Make everything writable so it can be cleaned up. - os.chmod(self.path, 0777) - os.chmod(self.i.path, 0777) + os.chmod(self.path, 0o777) + os.chmod(self.i.path, 0o777) def test_move_avoids_collision_with_existing_file(self): # Make a conflicting file at the destination. @@ -121,6 +124,23 @@ self.assertEqual(os.path.dirname(self.i.path), os.path.dirname(dest)) + @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") + def test_link_arrives(self): + self.i.move(link=True) + self.assertExists(self.dest) + self.assertTrue(os.path.islink(self.dest)) + self.assertEqual(os.readlink(self.dest), self.path) + + @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") + def test_link_does_not_depart(self): + self.i.move(link=True) + self.assertExists(self.path) + + @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") + def test_link_changes_path(self): + self.i.move(link=True) + self.assertEqual(self.i.path, util.normpath(self.dest)) + class HelperTest(_common.TestCase): def test_ancestry_works_on_file(self): @@ -162,7 +182,7 @@ self.lib = beets.library.Library(':memory:') self.lib.path_formats = \ [('default', join('$albumartist', '$album', '$title'))] - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') self.lib.directory = self.libdir self.i = item(self.lib) # Make a file for the item. @@ -172,19 +192,19 @@ # Make an album. self.ai = self.lib.add_album((self.i,)) # Alternate destination dir. - self.otherdir = os.path.join(self.temp_dir, 'testotherdir') + self.otherdir = os.path.join(self.temp_dir, b'testotherdir') def test_albuminfo_move_changes_paths(self): - self.ai.album = 'newAlbumName' + self.ai.album = u'newAlbumName' self.ai.move() self.ai.store() self.i.load() - self.assert_('newAlbumName' in self.i.path) + self.assertTrue(b'newAlbumName' in self.i.path) def test_albuminfo_move_moves_file(self): oldpath = self.i.path - self.ai.album = 'newAlbumName' + self.ai.album = u'newAlbumName' self.ai.move() self.ai.store() self.i.load() @@ -194,7 +214,7 @@ def test_albuminfo_move_copies_file(self): oldpath = self.i.path - self.ai.album = 'newAlbumName' + self.ai.album = u'newAlbumName' self.ai.move(True) self.ai.store() self.i.load() @@ -206,7 +226,7 @@ self.ai.move(basedir=self.otherdir) self.i.load() self.ai.store() - self.assertTrue('testotherdir' in self.i.path) + self.assertTrue(b'testotherdir' in self.i.path) class ArtFileTest(_common.TestCase): @@ -215,7 +235,7 @@ # Make library and item. self.lib = beets.library.Library(':memory:') - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') self.lib.directory = self.libdir self.i = item(self.lib) self.i.path = self.i.destination() @@ -230,7 +250,7 @@ self.ai.artpath = self.art self.ai.store() # Alternate destination dir. - self.otherdir = os.path.join(self.temp_dir, 'testotherdir') + self.otherdir = os.path.join(self.temp_dir, b'testotherdir') def test_art_deleted_when_items_deleted(self): self.assertTrue(os.path.exists(self.art)) @@ -240,7 +260,7 @@ def test_art_moves_with_album(self): self.assertTrue(os.path.exists(self.art)) oldpath = self.i.path - self.ai.album = 'newAlbum' + self.ai.album = u'newAlbum' self.ai.move() self.i.load() @@ -259,16 +279,16 @@ self.assertNotExists(self.art) newart = self.lib.get_album(self.i).artpath self.assertExists(newart) - self.assertTrue('testotherdir' in newart) + self.assertTrue(b'testotherdir' in newart) def test_setart_copies_image(self): os.remove(self.art) - newart = os.path.join(self.libdir, 'newart.jpg') + newart = os.path.join(self.libdir, b'newart.jpg') touch(newart) i2 = item() i2.path = self.i.path - i2.artist = 'someArtist' + i2.artist = u'someArtist' ai = self.lib.add_album((i2,)) i2.move(True) @@ -280,11 +300,11 @@ os.remove(self.art) # Original art. - newart = os.path.join(self.libdir, 'newart.jpg') + newart = os.path.join(self.libdir, b'newart.jpg') touch(newart) i2 = item() i2.path = self.i.path - i2.artist = 'someArtist' + i2.artist = u'someArtist' ai = self.lib.add_album((i2,)) i2.move(True) ai.set_art(newart) @@ -294,11 +314,11 @@ self.assertTrue(os.path.exists(ai.artpath)) def test_setart_to_existing_but_unset_art_works(self): - newart = os.path.join(self.libdir, 'newart.jpg') + newart = os.path.join(self.libdir, b'newart.jpg') touch(newart) i2 = item() i2.path = self.i.path - i2.artist = 'someArtist' + i2.artist = u'someArtist' ai = self.lib.add_album((i2,)) i2.move(True) @@ -311,11 +331,11 @@ self.assertTrue(os.path.exists(ai.artpath)) def test_setart_to_conflicting_file_gets_new_path(self): - newart = os.path.join(self.libdir, 'newart.jpg') + newart = os.path.join(self.libdir, b'newart.jpg') touch(newart) i2 = item() i2.path = self.i.path - i2.artist = 'someArtist' + i2.artist = u'someArtist' ai = self.lib.add_album((i2,)) i2.move(True) @@ -332,14 +352,14 @@ def test_setart_sets_permissions(self): os.remove(self.art) - newart = os.path.join(self.libdir, 'newart.jpg') + newart = os.path.join(self.libdir, b'newart.jpg') touch(newart) - os.chmod(newart, 0400) # read-only + os.chmod(newart, 0o400) # read-only try: i2 = item() i2.path = self.i.path - i2.artist = 'someArtist' + i2.artist = u'someArtist' ai = self.lib.add_album((i2,)) i2.move(True) ai.set_art(newart) @@ -350,19 +370,19 @@ finally: # Make everything writable so it can be cleaned up. - os.chmod(newart, 0777) - os.chmod(ai.artpath, 0777) + os.chmod(newart, 0o777) + os.chmod(ai.artpath, 0o777) def test_move_last_file_moves_albumart(self): oldartpath = self.lib.albums()[0].artpath self.assertExists(oldartpath) - self.ai.album = 'different_album' + self.ai.album = u'different_album' self.ai.store() self.ai.items()[0].move() artpath = self.lib.albums()[0].artpath - self.assertTrue('different_album' in artpath) + self.assertTrue(b'different_album' in artpath) self.assertExists(artpath) self.assertNotExists(oldartpath) @@ -374,12 +394,12 @@ oldartpath = self.lib.albums()[0].artpath self.assertExists(oldartpath) - self.i.album = 'different_album' + self.i.album = u'different_album' self.i.album_id = None # detach from album self.i.move() artpath = self.lib.albums()[0].artpath - self.assertFalse('different_album' in artpath) + self.assertFalse(b'different_album' in artpath) self.assertEqual(artpath, oldartpath) self.assertExists(oldartpath) @@ -390,7 +410,7 @@ # Make library and item. self.lib = beets.library.Library(':memory:') - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') self.lib.directory = self.libdir self.i = item(self.lib) self.i.path = self.i.destination() @@ -408,13 +428,13 @@ def test_removing_last_item_preserves_nonempty_dir(self): parent = os.path.dirname(self.i.path) - touch(os.path.join(parent, 'dummy.txt')) + touch(os.path.join(parent, b'dummy.txt')) self.i.remove(True) self.assertExists(parent) def test_removing_last_item_prunes_dir_with_blacklisted_file(self): parent = os.path.dirname(self.i.path) - touch(os.path.join(parent, '.DS_Store')) + touch(os.path.join(parent, b'.DS_Store')) self.i.remove(True) self.assertNotExists(parent) @@ -428,13 +448,13 @@ self.assertExists(self.libdir) def test_removing_item_outside_of_library_deletes_nothing(self): - self.lib.directory = os.path.join(self.temp_dir, 'xxx') + self.lib.directory = os.path.join(self.temp_dir, b'xxx') parent = os.path.dirname(self.i.path) self.i.remove(True) self.assertExists(parent) def test_removing_last_item_in_album_with_albumart_prunes_dir(self): - artfile = os.path.join(self.temp_dir, 'testart.jpg') + artfile = os.path.join(self.temp_dir, b'testart.jpg') touch(artfile) self.ai.set_art(artfile) self.ai.store() @@ -449,7 +469,7 @@ def setUp(self): super(SoftRemoveTest, self).setUp() - self.path = os.path.join(self.temp_dir, 'testfile') + self.path = os.path.join(self.temp_dir, b'testfile') touch(self.path) def test_soft_remove_deletes_file(self): @@ -458,20 +478,20 @@ def test_soft_remove_silent_on_no_file(self): try: - util.remove(self.path + 'XXX', True) + util.remove(self.path + b'XXX', True) except OSError: - self.fail('OSError when removing path') + self.fail(u'OSError when removing path') class SafeMoveCopyTest(_common.TestCase): def setUp(self): super(SafeMoveCopyTest, self).setUp() - self.path = os.path.join(self.temp_dir, 'testfile') + self.path = os.path.join(self.temp_dir, b'testfile') touch(self.path) - self.otherpath = os.path.join(self.temp_dir, 'testfile2') + self.otherpath = os.path.join(self.temp_dir, b'testfile2') touch(self.otherpath) - self.dest = self.path + '.dest' + self.dest = self.path + b'.dest' def test_successful_move(self): util.move(self.path, self.dest) @@ -504,9 +524,9 @@ def setUp(self): super(PruneTest, self).setUp() - self.base = os.path.join(self.temp_dir, 'testdir') + self.base = os.path.join(self.temp_dir, b'testdir') os.mkdir(self.base) - self.sub = os.path.join(self.base, 'subdir') + self.sub = os.path.join(self.base, b'subdir') os.mkdir(self.sub) def test_prune_existent_directory(self): @@ -515,7 +535,7 @@ self.assertNotExists(self.sub) def test_prune_nonexistent_directory(self): - util.prune_dirs(os.path.join(self.sub, 'another'), self.base) + util.prune_dirs(os.path.join(self.sub, b'another'), self.base) self.assertExists(self.base) self.assertNotExists(self.sub) @@ -524,37 +544,37 @@ def setUp(self): super(WalkTest, self).setUp() - self.base = os.path.join(self.temp_dir, 'testdir') + self.base = os.path.join(self.temp_dir, b'testdir') os.mkdir(self.base) - touch(os.path.join(self.base, 'y')) - touch(os.path.join(self.base, 'x')) - os.mkdir(os.path.join(self.base, 'd')) - touch(os.path.join(self.base, 'd', 'z')) + touch(os.path.join(self.base, b'y')) + touch(os.path.join(self.base, b'x')) + os.mkdir(os.path.join(self.base, b'd')) + touch(os.path.join(self.base, b'd', b'z')) def test_sorted_files(self): res = list(util.sorted_walk(self.base)) self.assertEqual(len(res), 2) self.assertEqual(res[0], - (self.base, ['d'], ['x', 'y'])) + (self.base, [b'd'], [b'x', b'y'])) self.assertEqual(res[1], - (os.path.join(self.base, 'd'), [], ['z'])) + (os.path.join(self.base, b'd'), [], [b'z'])) def test_ignore_file(self): - res = list(util.sorted_walk(self.base, ('x',))) + res = list(util.sorted_walk(self.base, (b'x',))) self.assertEqual(len(res), 2) self.assertEqual(res[0], - (self.base, ['d'], ['y'])) + (self.base, [b'd'], [b'y'])) self.assertEqual(res[1], - (os.path.join(self.base, 'd'), [], ['z'])) + (os.path.join(self.base, b'd'), [], [b'z'])) def test_ignore_directory(self): - res = list(util.sorted_walk(self.base, ('d',))) + res = list(util.sorted_walk(self.base, (b'd',))) self.assertEqual(len(res), 1) self.assertEqual(res[0], - (self.base, [], ['x', 'y'])) + (self.base, [], [b'x', b'y'])) def test_ignore_everything(self): - res = list(util.sorted_walk(self.base, ('*',))) + res = list(util.sorted_walk(self.base, (b'*',))) self.assertEqual(len(res), 1) self.assertEqual(res[0], (self.base, [], [])) @@ -564,28 +584,44 @@ def setUp(self): super(UniquePathTest, self).setUp() - self.base = os.path.join(self.temp_dir, 'testdir') + self.base = os.path.join(self.temp_dir, b'testdir') os.mkdir(self.base) - touch(os.path.join(self.base, 'x.mp3')) - touch(os.path.join(self.base, 'x.1.mp3')) - touch(os.path.join(self.base, 'x.2.mp3')) - touch(os.path.join(self.base, 'y.mp3')) + touch(os.path.join(self.base, b'x.mp3')) + touch(os.path.join(self.base, b'x.1.mp3')) + touch(os.path.join(self.base, b'x.2.mp3')) + touch(os.path.join(self.base, b'y.mp3')) def test_new_file_unchanged(self): - path = util.unique_path(os.path.join(self.base, 'z.mp3')) - self.assertEqual(path, os.path.join(self.base, 'z.mp3')) + path = util.unique_path(os.path.join(self.base, b'z.mp3')) + self.assertEqual(path, os.path.join(self.base, b'z.mp3')) def test_conflicting_file_appends_1(self): - path = util.unique_path(os.path.join(self.base, 'y.mp3')) - self.assertEqual(path, os.path.join(self.base, 'y.1.mp3')) + path = util.unique_path(os.path.join(self.base, b'y.mp3')) + self.assertEqual(path, os.path.join(self.base, b'y.1.mp3')) def test_conflicting_file_appends_higher_number(self): - path = util.unique_path(os.path.join(self.base, 'x.mp3')) - self.assertEqual(path, os.path.join(self.base, 'x.3.mp3')) + path = util.unique_path(os.path.join(self.base, b'x.mp3')) + self.assertEqual(path, os.path.join(self.base, b'x.3.mp3')) def test_conflicting_file_with_number_increases_number(self): - path = util.unique_path(os.path.join(self.base, 'x.1.mp3')) - self.assertEqual(path, os.path.join(self.base, 'x.3.mp3')) + path = util.unique_path(os.path.join(self.base, b'x.1.mp3')) + self.assertEqual(path, os.path.join(self.base, b'x.3.mp3')) + + +class MkDirAllTest(_common.TestCase): + def test_parent_exists(self): + path = os.path.join(self.temp_dir, b'foo', b'bar', b'baz', b'qux.mp3') + util.mkdirall(path) + self.assertTrue(os.path.isdir( + os.path.join(self.temp_dir, b'foo', b'bar', b'baz') + )) + + def test_child_does_not_exist(self): + path = os.path.join(self.temp_dir, b'foo', b'bar', b'baz', b'qux.mp3') + util.mkdirall(path) + self.assertTrue(not os.path.exists( + os.path.join(self.temp_dir, b'foo', b'bar', b'baz', b'qux.mp3') + )) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_ftintitle.py beets-1.3.19/test/test_ftintitle.py --- beets-1.3.8+dfsg/test/test_ftintitle.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_ftintitle.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the 'ftintitle' plugin.""" + +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import TestHelper +from beetsplug import ftintitle + + +class FtInTitlePluginFunctional(unittest.TestCase, TestHelper): + def setUp(self): + """Set up configuration""" + self.setup_beets() + self.load_plugins('ftintitle') + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + + def _ft_add_item(self, path, artist, title, aartist): + return self.add_item(path=path, + artist=artist, + artist_sort=artist, + title=title, + albumartist=aartist) + + def _ft_set_config(self, ftformat, drop=False, auto=True): + self.config['ftintitle']['format'] = ftformat + self.config['ftintitle']['drop'] = drop + self.config['ftintitle']['auto'] = auto + + def test_functional_drop(self): + item = self._ft_add_item('/', u'Alice ft Bob', u'Song 1', u'Alice') + self.run_command('ftintitle', '-d') + item.load() + self.assertEqual(item['artist'], u'Alice') + self.assertEqual(item['title'], u'Song 1') + + def test_functional_not_found(self): + item = self._ft_add_item('/', u'Alice ft Bob', u'Song 1', u'George') + self.run_command('ftintitle', '-d') + item.load() + # item should be unchanged + self.assertEqual(item['artist'], u'Alice ft Bob') + self.assertEqual(item['title'], u'Song 1') + + def test_functional_custom_format(self): + self._ft_set_config('feat. {0}') + item = self._ft_add_item('/', u'Alice ft Bob', u'Song 1', u'Alice') + self.run_command('ftintitle') + item.load() + self.assertEqual(item['artist'], u'Alice') + self.assertEqual(item['title'], u'Song 1 feat. Bob') + + self._ft_set_config('featuring {0}') + item = self._ft_add_item('/', u'Alice feat. Bob', u'Song 1', u'Alice') + self.run_command('ftintitle') + item.load() + self.assertEqual(item['artist'], u'Alice') + self.assertEqual(item['title'], u'Song 1 featuring Bob') + + self._ft_set_config('with {0}') + item = self._ft_add_item('/', u'Alice feat Bob', u'Song 1', u'Alice') + self.run_command('ftintitle') + item.load() + self.assertEqual(item['artist'], u'Alice') + self.assertEqual(item['title'], u'Song 1 with Bob') + + +class FtInTitlePluginTest(unittest.TestCase): + def setUp(self): + """Set up configuration""" + ftintitle.FtInTitlePlugin() + + def test_find_feat_part(self): + test_cases = [ + { + 'artist': 'Alice ft. Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice feat Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice featuring Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice & Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice and Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice With Bob', + 'album_artist': 'Alice', + 'feat_part': 'Bob' + }, + { + 'artist': 'Alice defeat Bob', + 'album_artist': 'Alice', + 'feat_part': None + }, + { + 'artist': 'Alice & Bob', + 'album_artist': 'Bob', + 'feat_part': 'Alice' + }, + { + 'artist': 'Alice ft. Bob', + 'album_artist': 'Bob', + 'feat_part': 'Alice' + }, + { + 'artist': 'Alice ft. Carol', + 'album_artist': 'Bob', + 'feat_part': None + }, + ] + + for test_case in test_cases: + feat_part = ftintitle.find_feat_part( + test_case['artist'], + test_case['album_artist'] + ) + self.assertEqual(feat_part, test_case['feat_part']) + + def test_split_on_feat(self): + parts = ftintitle.split_on_feat(u'Alice ft. Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice feat Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice feat. Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice featuring Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice & Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice and Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice With Bob') + self.assertEqual(parts, (u'Alice', u'Bob')) + parts = ftintitle.split_on_feat(u'Alice defeat Bob') + self.assertEqual(parts, (u'Alice defeat Bob', None)) + + def test_contains_feat(self): + self.assertTrue(ftintitle.contains_feat(u'Alice ft. Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice feat. Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice feat Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice featuring Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice & Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice and Bob')) + self.assertTrue(ftintitle.contains_feat(u'Alice With Bob')) + self.assertFalse(ftintitle.contains_feat(u'Alice defeat Bob')) + self.assertFalse(ftintitle.contains_feat(u'Aliceft.Bob')) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_hidden.py beets-1.3.19/test/test_hidden.py --- beets-1.3.8+dfsg/test/test_hidden.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_hidden.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the 'hidden' utility.""" + +from __future__ import division, absolute_import, print_function + +from test._common import unittest +import sys +import tempfile +from beets.util import hidden +import subprocess +import errno +import ctypes + + +class HiddenFileTest(unittest.TestCase): + def setUp(self): + pass + + def test_osx_hidden(self): + if not sys.platform == 'darwin': + self.skipTest('sys.platform is not darwin') + return + + with tempfile.NamedTemporaryFile(delete=False) as f: + try: + command = ["chflags", "hidden", f.name] + subprocess.Popen(command).wait() + except OSError as e: + if e.errno == errno.ENOENT: + self.skipTest("unable to find chflags") + else: + raise e + + self.assertTrue(hidden.is_hidden(f.name)) + + def test_windows_hidden(self): + if not sys.platform == 'win32': + self.skipTest('sys.platform is not windows') + return + + # FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation. + hidden_mask = 2 + + with tempfile.NamedTemporaryFile() as f: + # Hide the file using + success = ctypes.windll.kernel32.SetFileAttributesW(f.name, + hidden_mask) + + if not success: + self.skipTest("unable to set file attributes") + + self.assertTrue(hidden.is_hidden(f.name)) + + def test_other_hidden(self): + if sys.platform == 'darwin' or sys.platform == 'win32': + self.skipTest('sys.platform is known') + return + + with tempfile.NamedTemporaryFile(prefix='.tmp') as f: + self.assertTrue(hidden.is_hidden(f.name)) diff -Nru beets-1.3.8+dfsg/test/test_hook.py beets-1.3.19/test/test_hook.py --- beets-1.3.8+dfsg/test/test_hook.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_hook.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,117 @@ +# This file is part of beets. +# Copyright 2015, Thomas Scholtes. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import os.path +import tempfile + +from test import _common +from test._common import unittest +from test.helper import TestHelper + +from beets import config +from beets import plugins + + +def get_temporary_path(): + temporary_directory = tempfile._get_default_tempdir() + temporary_name = next(tempfile._get_candidate_names()) + + return os.path.join(temporary_directory, temporary_name) + + +class HookTest(_common.TestCase, TestHelper): + TEST_HOOK_COUNT = 5 + + def setUp(self): + self.setup_beets() # Converter is threaded + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + + def _add_hook(self, event, command): + hook = { + 'event': event, + 'command': command + } + + hooks = config['hook']['hooks'].get(list) if 'hook' in config else [] + hooks.append(hook) + + config['hook']['hooks'] = hooks + + def test_hook_no_arguments(self): + temporary_paths = [ + get_temporary_path() for i in range(self.TEST_HOOK_COUNT) + ] + + for index, path in enumerate(temporary_paths): + self._add_hook('test_no_argument_event_{0}'.format(index), + 'touch "{0}"'.format(path)) + + self.load_plugins('hook') + + for index in range(len(temporary_paths)): + plugins.send('test_no_argument_event_{0}'.format(index)) + + for path in temporary_paths: + self.assertTrue(os.path.isfile(path)) + os.remove(path) + + def test_hook_event_substitution(self): + temporary_directory = tempfile._get_default_tempdir() + event_names = ['test_event_event_{0}'.format(i) for i in + range(self.TEST_HOOK_COUNT)] + + for event in event_names: + self._add_hook(event, + 'touch "{0}/{{event}}"'.format(temporary_directory)) + + self.load_plugins('hook') + + for event in event_names: + plugins.send(event) + + for event in event_names: + path = os.path.join(temporary_directory, event) + + self.assertTrue(os.path.isfile(path)) + os.remove(path) + + def test_hook_argument_substitution(self): + temporary_paths = [ + get_temporary_path() for i in range(self.TEST_HOOK_COUNT) + ] + + for index, path in enumerate(temporary_paths): + self._add_hook('test_argument_event_{0}'.format(index), + 'touch "{path}"') + + self.load_plugins('hook') + + for index, path in enumerate(temporary_paths): + plugins.send('test_argument_event_{0}'.format(index), path=path) + + for path in temporary_paths: + self.assertTrue(os.path.isfile(path)) + os.remove(path) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_ihate.py beets-1.3.19/test/test_ihate.py --- beets-1.3.8+dfsg/test/test_ihate.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_ihate.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,6 +1,10 @@ +# -*- coding: utf-8 -*- + """Tests for the 'ihate' plugin""" -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test._common import unittest from beets import importer from beets.library import Item from beetsplug.ihate import IHatePlugin @@ -12,7 +16,7 @@ match_pattern = {} test_item = Item( - genre='TestGenre', + genre=u'TestGenre', album=u'TestAlbum', artist=u'TestArtist') task = importer.SingletonImportTask(None, test_item) @@ -21,25 +25,25 @@ self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern)) # 1 query match. - match_pattern = ["artist:bad_artist", "artist:TestArtist"] + match_pattern = [u"artist:bad_artist", u"artist:TestArtist"] self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern)) # 2 query matches, either should trigger. - match_pattern = ["album:test", "artist:testartist"] + match_pattern = [u"album:test", u"artist:testartist"] self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern)) # Query is blocked by AND clause. - match_pattern = ["album:notthis genre:testgenre"] + match_pattern = [u"album:notthis genre:testgenre"] self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern)) # Both queries are blocked by AND clause with unmatched condition. - match_pattern = ["album:notthis genre:testgenre", - "artist:testartist album:notthis"] + match_pattern = [u"album:notthis genre:testgenre", + u"artist:testartist album:notthis"] self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern)) # Only one query should fire. - match_pattern = ["album:testalbum genre:testgenre", - "artist:testartist album:notthis"] + match_pattern = [u"album:testalbum genre:testgenre", + u"artist:testartist album:notthis"] self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern)) diff -Nru beets-1.3.8+dfsg/test/test_importadded.py beets-1.3.19/test/test_importadded.py --- beets-1.3.8+dfsg/test/test_importadded.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_importadded.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Stig Inge Lea Bjornsen. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +"""Tests for the `importadded` plugin.""" + +import os + +from test._common import unittest +from test.test_importer import ImportHelper, AutotagStub +from beets import importer +from beets import util +from beetsplug.importadded import ImportAddedPlugin + +_listeners = ImportAddedPlugin.listeners + + +def preserve_plugin_listeners(): + """Preserve the initial plugin listeners as they would otherwise be + deleted after the first setup / tear down cycle. + """ + if not ImportAddedPlugin.listeners: + ImportAddedPlugin.listeners = _listeners + + +def modify_mtimes(paths, offset=-60000): + for i, path in enumerate(paths, start=1): + mstat = os.stat(path) + os.utime(path, (mstat.st_atime, mstat.st_mtime + offset * i)) + + +class ImportAddedTest(unittest.TestCase, ImportHelper): + + # The minimum mtime of the files to be imported + min_mtime = None + + def setUp(self): + preserve_plugin_listeners() + self.setup_beets() + self.load_plugins('importadded') + self._create_import_dir(2) + # Different mtimes on the files to be imported in order to test the + # plugin + modify_mtimes((mfile.path for mfile in self.media_files)) + self.min_mtime = min(os.path.getmtime(mfile.path) + for mfile in self.media_files) + self.matcher = AutotagStub().install() + self.matcher.macthin = AutotagStub.GOOD + self._setup_import_session() + self.importer.add_choice(importer.action.APPLY) + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + self.matcher.restore() + + def find_media_file(self, item): + """Find the pre-import MediaFile for an Item""" + for m in self.media_files: + if m.title.replace('Tag', 'Applied') == item.title: + return m + raise AssertionError(u"No MediaFile found for Item " + + util.displayable_path(item.path)) + + def assertEqualTimes(self, first, second, msg=None): # noqa + """For comparing file modification times at a sufficient precision""" + self.assertAlmostEqual(first, second, places=4, msg=msg) + + def assertAlbumImport(self): # noqa + self.importer.run() + album = self.lib.albums().get() + self.assertEqual(album.added, self.min_mtime) + for item in album.items(): + self.assertEqual(item.added, self.min_mtime) + + def test_import_album_with_added_dates(self): + self.assertAlbumImport() + + def test_import_album_inplace_with_added_dates(self): + self.config['import']['copy'] = False + self.config['import']['move'] = False + self.config['import']['link'] = False + self.assertAlbumImport() + + def test_import_album_with_preserved_mtimes(self): + self.config['importadded']['preserve_mtimes'] = True + self.importer.run() + album = self.lib.albums().get() + self.assertEqual(album.added, self.min_mtime) + for item in album.items(): + self.assertEqualTimes(item.added, self.min_mtime) + mediafile_mtime = os.path.getmtime(self.find_media_file(item).path) + self.assertEqualTimes(item.mtime, mediafile_mtime) + self.assertEqualTimes(os.path.getmtime(item.path), + mediafile_mtime) + + def test_reimported_album_skipped(self): + # Import and record the original added dates + self.importer.run() + album = self.lib.albums().get() + album_added_before = album.added + items_added_before = dict((item.path, item.added) + for item in album.items()) + # Newer Item path mtimes as if Beets had modified them + modify_mtimes(items_added_before.keys(), offset=10000) + # Reimport + self._setup_import_session(import_dir=album.path) + self.importer.run() + # Verify the reimported items + album = self.lib.albums().get() + self.assertEqualTimes(album.added, album_added_before) + items_added_after = dict((item.path, item.added) + for item in album.items()) + for item_path, added_after in items_added_after.iteritems(): + self.assertEqualTimes(items_added_before[item_path], added_after, + u"reimport modified Item.added for " + + util.displayable_path(item_path)) + + def test_import_singletons_with_added_dates(self): + self.config['import']['singletons'] = True + self.importer.run() + for item in self.lib.items(): + mfile = self.find_media_file(item) + self.assertEqualTimes(item.added, os.path.getmtime(mfile.path)) + + def test_import_singletons_with_preserved_mtimes(self): + self.config['import']['singletons'] = True + self.config['importadded']['preserve_mtimes'] = True + self.importer.run() + for item in self.lib.items(): + mediafile_mtime = os.path.getmtime(self.find_media_file(item).path) + self.assertEqualTimes(item.added, mediafile_mtime) + self.assertEqualTimes(item.mtime, mediafile_mtime) + self.assertEqualTimes(os.path.getmtime(item.path), + mediafile_mtime) + + def test_reimported_singletons_skipped(self): + self.config['import']['singletons'] = True + # Import and record the original added dates + self.importer.run() + items_added_before = dict((item.path, item.added) + for item in self.lib.items()) + # Newer Item path mtimes as if Beets had modified them + modify_mtimes(items_added_before.keys(), offset=10000) + # Reimport + import_dir = os.path.dirname(list(items_added_before.keys())[0]) + self._setup_import_session(import_dir=import_dir, singletons=True) + self.importer.run() + # Verify the reimported items + items_added_after = dict((item.path, item.added) + for item in self.lib.items()) + for item_path, added_after in items_added_after.iteritems(): + self.assertEqualTimes(items_added_before[item_path], added_after, + u"reimport modified Item.added for " + + util.displayable_path(item_path)) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_importer.py beets-1.3.19/test/test_importer.py --- beets-1.3.8+dfsg/test/test_importer.py 2014-09-16 17:25:24.000000000 +0000 +++ beets-1.3.19/test/test_importer.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,26 +13,33 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Tests for the general importer functionality. """ import os import re import shutil import StringIO +import unicodedata +import sys from tempfile import mkstemp from zipfile import ZipFile from tarfile import TarFile from mock import patch -import _common -from _common import unittest -from helper import TestImportSession, TestHelper, has_program +from test import _common +from test._common import unittest +from beets.util import displayable_path, bytestring_path +from test.helper import TestImportSession, TestHelper, has_program, capture_log from beets import importer from beets.importer import albums_in_dir from beets.mediafile import MediaFile from beets import autotag from beets.autotag import AlbumInfo, TrackInfo, AlbumMatch from beets import config +from beets import logging +from beets import util class AutotagStub(object): @@ -39,10 +47,10 @@ autotagger returns. """ - NONE = 'NONE' - IDENT = 'IDENT' - GOOD = 'GOOD' - BAD = 'BAD' + NONE = 'NONE' + IDENT = 'IDENT' + GOOD = 'GOOD' + BAD = 'BAD' MISSING = 'MISSING' """Generate an album match for all but one track """ @@ -53,15 +61,21 @@ def install(self): self.mb_match_album = autotag.mb.match_album self.mb_match_track = autotag.mb.match_track + self.mb_album_for_id = autotag.mb.album_for_id + self.mb_track_for_id = autotag.mb.track_for_id autotag.mb.match_album = self.match_album autotag.mb.match_track = self.match_track + autotag.mb.album_for_id = self.album_for_id + autotag.mb.track_for_id = self.track_for_id return self def restore(self): autotag.mb.match_album = self.mb_match_album - autotag.mb.match_track = self.mb_match_album + autotag.mb.match_track = self.mb_match_track + autotag.mb.album_for_id = self.mb_album_for_id + autotag.mb.track_for_id = self.mb_track_for_id def match_album(self, albumartist, album, tracks): if self.matching == self.IDENT: @@ -84,15 +98,23 @@ track_id=u'trackid', artist=artist.replace('Tag', 'Applied'), artist_id=u'artistid', - length=1 + length=1, + index=0, ) + def album_for_id(self, mbid): + return None + + def track_for_id(self, mbid): + return None + def _make_track_match(self, artist, album, number): return TrackInfo( title=u'Applied Title %d' % number, track_id=u'match %d' % number, artist=artist, - length=1 + length=1, + index=0, ) def _make_album_match(self, artist, album, tracks, distance=0, missing=0): @@ -101,19 +123,19 @@ else: id = '' if artist is None: - artist = "Various Artists" + artist = u"Various Artists" else: artist = artist.replace('Tag', 'Applied') + id album = album.replace('Tag', 'Applied') + id - trackInfos = [] + track_infos = [] for i in range(tracks - missing): - trackInfos.append(self._make_track_match(artist, album, i + 1)) + track_infos.append(self._make_track_match(artist, album, i + 1)) return AlbumInfo( artist=artist, album=album, - tracks=trackInfos, + tracks=track_infos, va=False, album_id=u'albumid' + id, artist_id=u'artistid' + id, @@ -130,9 +152,9 @@ def setup_beets(self, disk=False): super(ImportHelper, self).setup_beets(disk) self.lib.path_formats = [ - ('default', os.path.join('$artist', '$album', '$title')), - ('singleton:true', os.path.join('singletons', '$title')), - ('comp:true', os.path.join('compilations', '$album', '$title')), + (u'default', os.path.join('$artist', '$album', '$title')), + (u'singleton:true', os.path.join('singletons', '$title')), + (u'comp:true', os.path.join('compilations', '$album', '$title')), ] def _create_import_dir(self, count=3): @@ -149,18 +171,18 @@ :param count: Number of files to create """ - self.import_dir = os.path.join(self.temp_dir, 'testsrcdir') + self.import_dir = os.path.join(self.temp_dir, b'testsrcdir') if os.path.isdir(self.import_dir): shutil.rmtree(self.import_dir) - album_path = os.path.join(self.import_dir, 'the_album') + album_path = os.path.join(self.import_dir, b'the_album') os.makedirs(album_path) - resource_path = os.path.join(_common.RSRC, 'full.mp3') + resource_path = os.path.join(_common.RSRC, b'full.mp3') metadata = { - 'artist': 'Tag Artist', - 'album': 'Tag Album', + 'artist': u'Tag Artist', + 'album': u'Tag Album', 'albumartist': None, 'mb_trackid': None, 'mb_albumid': None, @@ -169,13 +191,16 @@ self.media_files = [] for i in range(count): # Copy files - medium_path = os.path.join(album_path, 'track_%d.mp3' % (i + 1)) + medium_path = os.path.join( + album_path, + bytestring_path('track_%d.mp3' % (i + 1)) + ) shutil.copy(resource_path, medium_path) medium = MediaFile(medium_path) # Set metadata metadata['track'] = i + 1 - metadata['title'] = 'Tag Title %d' % (i + 1) + metadata['title'] = u'Tag Title %d' % (i + 1) for attr in metadata: setattr(medium, attr, metadata[attr]) medium.save() @@ -184,7 +209,7 @@ def _setup_import_session(self, import_dir=None, delete=False, threaded=False, copy=True, singletons=False, - move=False, autotag=True): + move=False, autotag=True, link=False): config['import']['copy'] = copy config['import']['delete'] = delete config['import']['timid'] = True @@ -193,9 +218,10 @@ config['import']['move'] = move config['import']['autotag'] = autotag config['import']['resume'] = False + config['import']['link'] = link self.importer = TestImportSession( - self.lib, logfile=None, query=None, + self.lib, loghandler=None, query=None, paths=[import_dir or self.import_dir] ) @@ -215,6 +241,7 @@ self.assertEqual(len(os.listdir(self.libdir)), 0) +@_common.slow_test() class NonAutotaggedImportTest(_common.TestCase, ImportHelper): def setUp(self): self.setup_beets(disk=True) @@ -228,14 +255,14 @@ self.importer.run() albums = self.lib.albums() self.assertEqual(len(albums), 1) - self.assertEqual(albums[0].albumartist, 'Tag Artist') + self.assertEqual(albums[0].albumartist, u'Tag Artist') def test_import_copy_arrives(self): self.importer.run() for mediafile in self.import_media: self.assert_file_in_lib( - 'Tag Artist', 'Tag Album', '%s.mp3' % mediafile.title - ) + b'Tag Artist', b'Tag Album', + util.bytestring_path('{0}.mp3'.format(mediafile.title))) def test_threaded_import_copy_arrives(self): config['threaded'] = True @@ -243,8 +270,8 @@ self.importer.run() for mediafile in self.import_media: self.assert_file_in_lib( - 'Tag Artist', 'Tag Album', '%s.mp3' % mediafile.title - ) + b'Tag Artist', b'Tag Album', + util.bytestring_path('{0}.mp3'.format(mediafile.title))) def test_import_with_move_deletes_import_files(self): config['import']['move'] = True @@ -258,19 +285,19 @@ def test_import_with_move_prunes_directory_empty(self): config['import']['move'] = True - self.assertExists(os.path.join(self.import_dir, 'the_album')) + self.assertExists(os.path.join(self.import_dir, b'the_album')) self.importer.run() - self.assertNotExists(os.path.join(self.import_dir, 'the_album')) + self.assertNotExists(os.path.join(self.import_dir, b'the_album')) def test_import_with_move_prunes_with_extra_clutter(self): - f = open(os.path.join(self.import_dir, 'the_album', 'alog.log'), 'w') + f = open(os.path.join(self.import_dir, b'the_album', b'alog.log'), 'w') f.close() config['clutter'] = ['*.log'] config['import']['move'] = True - self.assertExists(os.path.join(self.import_dir, 'the_album')) + self.assertExists(os.path.join(self.import_dir, b'the_album')) self.importer.run() - self.assertNotExists(os.path.join(self.import_dir, 'the_album')) + self.assertNotExists(os.path.join(self.import_dir, b'the_album')) def test_threaded_import_move_arrives(self): config['import']['move'] = True @@ -279,8 +306,8 @@ self.importer.run() for mediafile in self.import_media: self.assert_file_in_lib( - 'Tag Artist', 'Tag Album', '%s.mp3' % mediafile.title - ) + b'Tag Artist', b'Tag Album', + util.bytestring_path('{0}.mp3'.format(mediafile.title))) def test_threaded_import_move_deletes_import(self): config['import']['move'] = True @@ -305,9 +332,58 @@ def test_import_with_delete_prunes_directory_empty(self): config['import']['delete'] = True - self.assertExists(os.path.join(self.import_dir, 'the_album')) + self.assertExists(os.path.join(self.import_dir, b'the_album')) + self.importer.run() + self.assertNotExists(os.path.join(self.import_dir, b'the_album')) + + @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") + def test_import_link_arrives(self): + config['import']['link'] = True self.importer.run() - self.assertNotExists(os.path.join(self.import_dir, 'the_album')) + for mediafile in self.import_media: + filename = os.path.join( + self.libdir, + b'Tag Artist', b'Tag Album', + util.bytestring_path('{0}.mp3'.format(mediafile.title)) + ) + self.assertExists(filename) + self.assertTrue(os.path.islink(filename)) + self.assert_equal_path(os.readlink(filename), mediafile.path) + + +def create_archive(session): + (handle, path) = mkstemp(dir=session.temp_dir) + os.close(handle) + archive = ZipFile(path, mode='w') + archive.write(os.path.join(_common.RSRC, 'full.mp3'), + 'full.mp3') + archive.close() + return path + + +class RmTempTest(unittest.TestCase, ImportHelper, _common.Assertions): + """Tests that temporarily extracted archives are properly removed + after usage. + """ + + def setUp(self): + self.setup_beets() + self.want_resume = False + self.config['incremental'] = False + self._old_home = None + + def tearDown(self): + self.teardown_beets() + + def test_rm(self): + zip_path = create_archive(self) + archive_task = importer.ArchiveImportTask(zip_path) + archive_task.extract() + tmp_path = archive_task.toppath + self._setup_import_session(autotag=False, import_dir=tmp_path) + self.assertExists(tmp_path) + archive_task.finalize(self) + self.assertNotExists(tmp_path) class ImportZipTest(unittest.TestCase, ImportHelper): @@ -319,7 +395,7 @@ self.teardown_beets() def test_import_zip(self): - zip_path = self.create_archive() + zip_path = create_archive(self) self.assertEqual(len(self.lib.items()), 0) self.assertEqual(len(self.lib.albums()), 0) @@ -328,15 +404,6 @@ self.assertEqual(len(self.lib.items()), 1) self.assertEqual(len(self.lib.albums()), 1) - def create_archive(self): - (handle, path) = mkstemp(dir=self.temp_dir) - os.close(handle) - archive = ZipFile(path, mode='w') - archive.write(os.path.join(_common.RSRC, 'full.mp3'), - 'full.mp3') - archive.close() - return path - class ImportTarTest(ImportZipTest): @@ -350,14 +417,14 @@ return path -@unittest.skipIf(not has_program('unrar'), 'unrar program not found') +@unittest.skipIf(not has_program('unrar'), u'unrar program not found') class ImportRarTest(ImportZipTest): def create_archive(self): return os.path.join(_common.RSRC, 'archive.rar') -@unittest.skip('Implment me!') +@unittest.skip('Implement me!') class ImportPasswordRarTest(ImportZipTest): def create_archive(self): @@ -385,7 +452,7 @@ self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'Tag Title 1') + self.assertEqual(self.lib.items().get().title, u'Tag Title 1') def test_apply_asis_does_not_add_album(self): self.assertEqual(self.lib.albums().get(), None) @@ -399,14 +466,14 @@ self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assert_file_in_lib('singletons', 'Tag Title 1.mp3') + self.assert_file_in_lib(b'singletons', b'Tag Title 1.mp3') def test_apply_candidate_adds_track(self): self.assertEqual(self.lib.items().get(), None) self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'Applied Title 1') + self.assertEqual(self.lib.items().get().title, u'Applied Title 1') def test_apply_candidate_does_not_add_album(self): self.importer.add_choice(importer.action.APPLY) @@ -418,7 +485,7 @@ self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assert_file_in_lib('singletons', 'Applied Title 1.mp3') + self.assert_file_in_lib(b'singletons', b'Applied Title 1.mp3') def test_skip_does_not_add_first_track(self): self.importer.add_choice(importer.action.SKIP) @@ -433,12 +500,12 @@ self.assertEqual(len(self.lib.items()), 1) def test_import_single_files(self): - resource_path = os.path.join(_common.RSRC, u'empty.mp3') - single_path = os.path.join(self.import_dir, u'track_2.mp3') + resource_path = os.path.join(_common.RSRC, b'empty.mp3') + single_path = os.path.join(self.import_dir, b'track_2.mp3') shutil.copy(resource_path, single_path) import_files = [ - os.path.join(self.import_dir, u'the_album'), + os.path.join(self.import_dir, b'the_album'), single_path ] self._setup_import_session(singletons=False) @@ -471,34 +538,35 @@ self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.albums().get().album, 'Tag Album') + self.assertEqual(self.lib.albums().get().album, u'Tag Album') def test_apply_asis_adds_tracks(self): self.assertEqual(self.lib.items().get(), None) self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'Tag Title 1') + self.assertEqual(self.lib.items().get().title, u'Tag Title 1') def test_apply_asis_adds_album_path(self): self.assert_lib_dir_empty() self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assert_file_in_lib('Tag Artist', 'Tag Album', 'Tag Title 1.mp3') + self.assert_file_in_lib( + b'Tag Artist', b'Tag Album', b'Tag Title 1.mp3') def test_apply_candidate_adds_album(self): self.assertEqual(self.lib.albums().get(), None) self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assertEqual(self.lib.albums().get().album, 'Applied Album') + self.assertEqual(self.lib.albums().get().album, u'Applied Album') def test_apply_candidate_adds_tracks(self): self.assertEqual(self.lib.items().get(), None) self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'Applied Title 1') + self.assertEqual(self.lib.items().get().title, u'Applied Title 1') def test_apply_candidate_adds_album_path(self): self.assert_lib_dir_empty() @@ -506,13 +574,13 @@ self.importer.add_choice(importer.action.APPLY) self.importer.run() self.assert_file_in_lib( - 'Applied Artist', 'Applied Album', 'Applied Title 1.mp3' - ) + b'Applied Artist', b'Applied Album', b'Applied Title 1.mp3') def test_apply_with_move_deletes_import(self): config['import']['move'] = True - import_file = os.path.join(self.import_dir, 'the_album', 'track_1.mp3') + import_file = os.path.join( + self.import_dir, b'the_album', b'track_1.mp3') self.assertExists(import_file) self.importer.add_choice(importer.action.APPLY) @@ -522,7 +590,8 @@ def test_apply_with_delete_deletes_import(self): config['import']['delete'] = True - import_file = os.path.join(self.import_dir, 'the_album', 'track_1.mp3') + import_file = os.path.join(self.import_dir, + b'the_album', b'track_1.mp3') self.assertExists(import_file) self.importer.add_choice(importer.action.APPLY) @@ -536,8 +605,8 @@ def test_skip_non_album_dirs(self): self.assertTrue(os.path.isdir( - os.path.join(self.import_dir, 'the_album'))) - self.touch('cruft', dir=self.import_dir) + os.path.join(self.import_dir, b'the_album'))) + self.touch(b'cruft', dir=self.import_dir) self.importer.add_choice(importer.action.APPLY) self.importer.run() self.assertEqual(len(self.lib.albums()), 1) @@ -549,6 +618,35 @@ self.importer.run() self.assertEqual(len(self.lib.items()), 1) + def test_empty_directory_warning(self): + import_dir = os.path.join(self.temp_dir, b'empty') + self.touch(b'non-audio', dir=import_dir) + self._setup_import_session(import_dir=import_dir) + with capture_log() as logs: + self.importer.run() + + import_dir = displayable_path(import_dir) + self.assertIn(u'No files imported from {0}'.format(import_dir), logs) + + def test_empty_directory_singleton_warning(self): + import_dir = os.path.join(self.temp_dir, b'empty') + self.touch(b'non-audio', dir=import_dir) + self._setup_import_session(import_dir=import_dir, singletons=True) + with capture_log() as logs: + self.importer.run() + + import_dir = displayable_path(import_dir) + self.assertIn(u'No files imported from {0}'.format(import_dir), logs) + + def test_asis_no_data_source(self): + self.assertEqual(self.lib.items().get(), None) + + self.importer.add_choice(importer.action.ASIS) + self.importer.run() + + with self.assertRaises(AttributeError): + self.lib.items().get().data_source + class ImportTracksTest(_common.TestCase, ImportHelper): """Test TRACKS and APPLY choice. @@ -571,7 +669,7 @@ self.importer.add_choice(importer.action.APPLY) self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'Applied Title 1') + self.assertEqual(self.lib.items().get().title, u'Applied Title 1') self.assertEqual(self.lib.albums().get(), None) def test_apply_tracks_adds_singleton_path(self): @@ -581,7 +679,7 @@ self.importer.add_choice(importer.action.APPLY) self.importer.add_choice(importer.action.APPLY) self.importer.run() - self.assert_file_in_lib('singletons', 'Applied Title 1.mp3') + self.assert_file_in_lib(b'singletons', b'Applied Title 1.mp3') class ImportCompilationTest(_common.TestCase, ImportHelper): @@ -600,27 +698,27 @@ def test_asis_homogenous_sets_albumartist(self): self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.albums().get().albumartist, 'Tag Artist') + self.assertEqual(self.lib.albums().get().albumartist, u'Tag Artist') for item in self.lib.items(): - self.assertEqual(item.albumartist, 'Tag Artist') + self.assertEqual(item.albumartist, u'Tag Artist') def test_asis_heterogenous_sets_various_albumartist(self): - self.import_media[0].artist = 'Other Artist' + self.import_media[0].artist = u'Other Artist' self.import_media[0].save() - self.import_media[1].artist = 'Another Artist' + self.import_media[1].artist = u'Another Artist' self.import_media[1].save() self.importer.add_choice(importer.action.ASIS) self.importer.run() self.assertEqual(self.lib.albums().get().albumartist, - 'Various Artists') + u'Various Artists') for item in self.lib.items(): - self.assertEqual(item.albumartist, 'Various Artists') + self.assertEqual(item.albumartist, u'Various Artists') def test_asis_heterogenous_sets_sompilation(self): - self.import_media[0].artist = 'Other Artist' + self.import_media[0].artist = u'Other Artist' self.import_media[0].save() - self.import_media[1].artist = 'Another Artist' + self.import_media[1].artist = u'Another Artist' self.import_media[1].save() self.importer.add_choice(importer.action.ASIS) @@ -629,33 +727,33 @@ self.assertTrue(item.comp) def test_asis_sets_majority_albumartist(self): - self.import_media[0].artist = 'Other Artist' + self.import_media[0].artist = u'Other Artist' self.import_media[0].save() - self.import_media[1].artist = 'Other Artist' + self.import_media[1].artist = u'Other Artist' self.import_media[1].save() self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.albums().get().albumartist, 'Other Artist') + self.assertEqual(self.lib.albums().get().albumartist, u'Other Artist') for item in self.lib.items(): - self.assertEqual(item.albumartist, 'Other Artist') + self.assertEqual(item.albumartist, u'Other Artist') def test_asis_albumartist_tag_sets_albumartist(self): - self.import_media[0].artist = 'Other Artist' - self.import_media[1].artist = 'Another Artist' + self.import_media[0].artist = u'Other Artist' + self.import_media[1].artist = u'Another Artist' for mediafile in self.import_media: - mediafile.albumartist = 'Album Artist' - mediafile.mb_albumartistid = 'Album Artist ID' + mediafile.albumartist = u'Album Artist' + mediafile.mb_albumartistid = u'Album Artist ID' mediafile.save() self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.albums().get().albumartist, 'Album Artist') + self.assertEqual(self.lib.albums().get().albumartist, u'Album Artist') self.assertEqual(self.lib.albums().get().mb_albumartistid, - 'Album Artist ID') + u'Album Artist ID') for item in self.lib.items(): - self.assertEqual(item.albumartist, 'Album Artist') - self.assertEqual(item.mb_albumartistid, 'Album Artist ID') + self.assertEqual(item.albumartist, u'Album Artist') + self.assertEqual(item.mb_albumartistid, u'Album Artist ID') class ImportExistingTest(_common.TestCase, ImportHelper): @@ -706,68 +804,68 @@ def test_asis_updates_metadata(self): self.setup_importer.run() medium = MediaFile(self.lib.items().get().path) - medium.title = 'New Title' + medium.title = u'New Title' medium.save() self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assertEqual(self.lib.items().get().title, 'New Title') + self.assertEqual(self.lib.items().get().title, u'New Title') def test_asis_updated_moves_file(self): self.setup_importer.run() medium = MediaFile(self.lib.items().get().path) - medium.title = 'New Title' + medium.title = u'New Title' medium.save() - old_path = os.path.join('Applied Artist', 'Applied Album', - 'Applied Title 1.mp3') + old_path = os.path.join(b'Applied Artist', b'Applied Album', + b'Applied Title 1.mp3') self.assert_file_in_lib(old_path) self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assert_file_in_lib('Applied Artist', 'Applied Album', - 'New Title.mp3') + self.assert_file_in_lib(b'Applied Artist', b'Applied Album', + b'New Title.mp3') self.assert_file_not_in_lib(old_path) def test_asis_updated_without_copy_does_not_move_file(self): self.setup_importer.run() medium = MediaFile(self.lib.items().get().path) - medium.title = 'New Title' + medium.title = u'New Title' medium.save() - old_path = os.path.join('Applied Artist', 'Applied Album', - 'Applied Title 1.mp3') + old_path = os.path.join(b'Applied Artist', b'Applied Album', + b'Applied Title 1.mp3') self.assert_file_in_lib(old_path) config['import']['copy'] = False self.importer.add_choice(importer.action.ASIS) self.importer.run() - self.assert_file_not_in_lib('Applied Artist', 'Applied Album', - 'New Title.mp3') + self.assert_file_not_in_lib(b'Applied Artist', b'Applied Album', + b'New Title.mp3') self.assert_file_in_lib(old_path) def test_outside_file_is_copied(self): config['import']['copy'] = False self.setup_importer.run() - self.assertEqual(self.lib.items().get().path, - self.import_media[0].path) + self.assert_equal_path(self.lib.items().get().path, + self.import_media[0].path) config['import']['copy'] = True self._setup_import_session() self.importer.add_choice(importer.action.APPLY) self.importer.run() - new_path = os.path.join('Applied Artist', 'Applied Album', - 'Applied Title 1.mp3') + new_path = os.path.join(b'Applied Artist', b'Applied Album', + b'Applied Title 1.mp3') self.assert_file_in_lib(new_path) - self.assertEqual(self.lib.items().get().path, - os.path.join(self.libdir, new_path)) + self.assert_equal_path(self.lib.items().get().path, + os.path.join(self.libdir, new_path)) def test_outside_file_is_moved(self): config['import']['copy'] = False self.setup_importer.run() - self.assertEqual(self.lib.items().get().path, - self.import_media[0].path) + self.assert_equal_path(self.lib.items().get().path, + self.import_media[0].path) self._setup_import_session(move=True) self.importer.add_choice(importer.action.APPLY) @@ -793,8 +891,8 @@ self.matcher.restore() def test_add_album_for_different_artist_and_different_album(self): - self.import_media[0].artist = "Artist B" - self.import_media[0].album = "Album B" + self.import_media[0].artist = u"Artist B" + self.import_media[0].album = u"Album B" self.import_media[0].save() self.importer.run() @@ -802,11 +900,11 @@ self.assertEqual(albums, set(['Album B', 'Tag Album'])) def test_add_album_for_different_artist_and_same_albumartist(self): - self.import_media[0].artist = "Artist B" - self.import_media[0].albumartist = "Album Artist" + self.import_media[0].artist = u"Artist B" + self.import_media[0].albumartist = u"Album Artist" self.import_media[0].save() - self.import_media[1].artist = "Artist C" - self.import_media[1].albumartist = "Album Artist" + self.import_media[1].artist = u"Artist C" + self.import_media[1].albumartist = u"Album Artist" self.import_media[1].save() self.importer.run() @@ -814,7 +912,7 @@ self.assertEqual(artists, set(['Album Artist', 'Tag Artist'])) def test_add_album_for_same_artist_and_different_album(self): - self.import_media[0].album = "Album B" + self.import_media[0].album = u"Album B" self.import_media[0].save() self.importer.run() @@ -822,7 +920,7 @@ self.assertEqual(albums, set(['Album B', 'Tag Album'])) def test_add_album_for_same_album_and_different_artist(self): - self.import_media[0].artist = "Artist B" + self.import_media[0].artist = u"Artist B" self.import_media[0].save() self.importer.run() @@ -831,7 +929,7 @@ def test_incremental(self): config['import']['incremental'] = True - self.import_media[0].album = "Album B" + self.import_media[0].album = u"Album B" self.import_media[0].save() self.importer.run() @@ -863,12 +961,12 @@ def test_choose_first_candidate(self): self.importer.add_choice(1) self.importer.run() - self.assertEqual(self.lib.albums().get().album, 'Applied Album M') + self.assertEqual(self.lib.albums().get().album, u'Applied Album M') def test_choose_second_candidate(self): self.importer.add_choice(2) self.importer.run() - self.assertEqual(self.lib.albums().get().album, 'Applied Album MM') + self.assertEqual(self.lib.albums().get().album, u'Applied Album MM') class InferAlbumDataTest(_common.TestCase): @@ -878,9 +976,9 @@ i1 = _common.item() i2 = _common.item() i3 = _common.item() - i1.title = 'first item' - i2.title = 'second item' - i3.title = 'third item' + i1.title = u'first item' + i2.title = u'second item' + i3.title = u'third item' i1.comp = i2.comp = i3.comp = False i1.albumartist = i2.albumartist = i3.albumartist = '' i1.mb_albumartistid = i2.mb_albumartistid = i3.mb_albumartistid = '' @@ -888,7 +986,6 @@ self.task = importer.ImportTask(paths=['a path'], toppath='top path', items=self.items) - self.task.set_null_candidates() def test_asis_homogenous_single_artist(self): self.task.set_choice(importer.action.ASIS) @@ -897,28 +994,28 @@ self.assertEqual(self.items[0].albumartist, self.items[2].artist) def test_asis_heterogenous_va(self): - self.items[0].artist = 'another artist' - self.items[1].artist = 'some other artist' + self.items[0].artist = u'another artist' + self.items[1].artist = u'some other artist' self.task.set_choice(importer.action.ASIS) self.task.align_album_level_fields() self.assertTrue(self.items[0].comp) - self.assertEqual(self.items[0].albumartist, 'Various Artists') + self.assertEqual(self.items[0].albumartist, u'Various Artists') def test_asis_comp_applied_to_all_items(self): - self.items[0].artist = 'another artist' - self.items[1].artist = 'some other artist' + self.items[0].artist = u'another artist' + self.items[1].artist = u'some other artist' self.task.set_choice(importer.action.ASIS) self.task.align_album_level_fields() for item in self.items: self.assertTrue(item.comp) - self.assertEqual(item.albumartist, 'Various Artists') + self.assertEqual(item.albumartist, u'Various Artists') def test_asis_majority_artist_single_artist(self): - self.items[0].artist = 'another artist' + self.items[0].artist = u'another artist' self.task.set_choice(importer.action.ASIS) self.task.align_album_level_fields() @@ -927,19 +1024,19 @@ self.assertEqual(self.items[0].albumartist, self.items[2].artist) def test_asis_track_albumartist_override(self): - self.items[0].artist = 'another artist' - self.items[1].artist = 'some other artist' + self.items[0].artist = u'another artist' + self.items[1].artist = u'some other artist' for item in self.items: - item.albumartist = 'some album artist' - item.mb_albumartistid = 'some album artist id' + item.albumartist = u'some album artist' + item.mb_albumartistid = u'some album artist id' self.task.set_choice(importer.action.ASIS) self.task.align_album_level_fields() self.assertEqual(self.items[0].albumartist, - 'some album artist') + u'some album artist') self.assertEqual(self.items[0].mb_albumartistid, - 'some album artist id') + u'some album artist id') def test_apply_gets_artist_and_id(self): self.task.set_choice(AlbumMatch(0, None, {}, set(), set())) # APPLY @@ -952,16 +1049,16 @@ def test_apply_lets_album_values_override(self): for item in self.items: - item.albumartist = 'some album artist' - item.mb_albumartistid = 'some album artist id' + item.albumartist = u'some album artist' + item.mb_albumartistid = u'some album artist id' self.task.set_choice(AlbumMatch(0, None, {}, set(), set())) # APPLY self.task.align_album_level_fields() self.assertEqual(self.items[0].albumartist, - 'some album artist') + u'some album artist') self.assertEqual(self.items[0].mb_albumartistid, - 'some album artist id') + u'some album artist id') def test_small_single_artist_album(self): self.items = [self.items[0]] @@ -971,7 +1068,26 @@ self.assertFalse(self.items[0].comp) -class ImportDuplicateAlbumTest(unittest.TestCase, TestHelper): +def test_album_info(): + """Create an AlbumInfo object for testing. + """ + track_info = TrackInfo( + title=u'new title', + track_id=u'trackid', + index=0, + ) + album_info = AlbumInfo( + artist=u'artist', + album=u'album', + tracks=[track_info], + album_id=u'albumid', + artist_id=u'artistid', + ) + return album_info + + +class ImportDuplicateAlbumTest(unittest.TestCase, TestHelper, + _common.Assertions): def setUp(self): self.setup_beets() @@ -982,18 +1098,7 @@ # Create duplicate through autotagger self.match_album_patcher = patch('beets.autotag.mb.match_album') self.match_album = self.match_album_patcher.start() - track_info = TrackInfo( - title=u'new title', - track_id=u'trackid', - ) - album_info = AlbumInfo( - artist=u'artist', - album=u'album', - tracks=[track_info], - album_id=u'albumid', - artist_id=u'artistid', - ) - self.match_album.return_value = iter([album_info]) + self.match_album.return_value = iter([test_album_info()]) # Create import session self.importer = self.create_importer() @@ -1006,17 +1111,40 @@ def test_remove_duplicate_album(self): item = self.lib.items().get() self.assertEqual(item.title, u't\xeftle 0') - self.assertTrue(os.path.isfile(item.path)) + self.assertExists(item.path) self.importer.default_resolution = self.importer.Resolution.REMOVE self.importer.run() - self.assertFalse(os.path.isfile(item.path)) + self.assertNotExists(item.path) self.assertEqual(len(self.lib.albums()), 1) self.assertEqual(len(self.lib.items()), 1) item = self.lib.items().get() self.assertEqual(item.title, u'new title') + def test_no_autotag_keeps_duplicate_album(self): + config['import']['autotag'] = False + item = self.lib.items().get() + self.assertEqual(item.title, u't\xeftle 0') + self.assertExists(item.path) + + # Imported item has the same artist and album as the one in the + # library. + import_file = os.path.join(self.importer.paths[0], + b'album 0', b'track 0.mp3') + import_file = MediaFile(import_file) + import_file.artist = item['artist'] + import_file.albumartist = item['artist'] + import_file.album = item['album'] + import_file.title = 'new title' + + self.importer.default_resolution = self.importer.Resolution.REMOVE + self.importer.run() + + self.assertExists(item.path) + self.assertEqual(len(self.lib.albums()), 2) + self.assertEqual(len(self.lib.items()), 2) + def test_keep_duplicate_album(self): self.importer.default_resolution = self.importer.Resolution.KEEPBOTH self.importer.run() @@ -1047,7 +1175,8 @@ return album -class ImportDuplicateSingletonTest(unittest.TestCase, TestHelper): +class ImportDuplicateSingletonTest(unittest.TestCase, TestHelper, + _common.Assertions): def setUp(self): self.setup_beets() @@ -1063,6 +1192,7 @@ artist=u'artist', title=u'title', track_id=u'new trackid', + index=0, ) self.match_track.return_value = iter([track_info]) @@ -1078,12 +1208,12 @@ def test_remove_duplicate(self): item = self.lib.items().get() self.assertEqual(item.mb_trackid, u'old trackid') - self.assertTrue(os.path.isfile(item.path)) + self.assertExists(item.path) self.importer.default_resolution = self.importer.Resolution.REMOVE self.importer.run() - self.assertFalse(os.path.isfile(item.path)) + self.assertNotExists(item.path) self.assertEqual(len(self.lib.items()), 1) item = self.lib.items().get() self.assertEqual(item.mb_trackid, u'new trackid') @@ -1121,15 +1251,17 @@ class TagLogTest(_common.TestCase): def test_tag_log_line(self): sio = StringIO.StringIO() - session = _common.import_session(logfile=sio) + handler = logging.StreamHandler(sio) + session = _common.import_session(loghandler=handler) session.tag_log('status', 'path') - assert 'status path' in sio.getvalue() + self.assertIn('status path', sio.getvalue()) def test_tag_log_unicode(self): sio = StringIO.StringIO() - session = _common.import_session(logfile=sio) - session.tag_log('status', 'caf\xc3\xa9') - assert 'status caf' in sio.getvalue() + handler = logging.StreamHandler(sio) + session = _common.import_session(loghandler=handler) + session.tag_log('status', u'caf\xe9') # send unicode + self.assertIn(u'status caf\xe9', sio.getvalue()) class ResumeImportTest(unittest.TestCase, TestHelper): @@ -1154,11 +1286,11 @@ self.importer.run() self.assertEqual(len(self.lib.albums()), 1) - self.assertIsNotNone(self.lib.albums('album:album 0').get()) + self.assertIsNotNone(self.lib.albums(u'album:album 0').get()) self.importer.run() self.assertEqual(len(self.lib.albums()), 2) - self.assertIsNotNone(self.lib.albums('album:album 1').get()) + self.assertIsNotNone(self.lib.albums(u'album:album 1').get()) @patch('beets.plugins.send') def test_resume_singleton(self, plugins_send): @@ -1175,11 +1307,11 @@ self.importer.run() self.assertEqual(len(self.lib.items()), 1) - self.assertIsNotNone(self.lib.items('title:track 0').get()) + self.assertIsNotNone(self.lib.items(u'title:track 0').get()) self.importer.run() self.assertEqual(len(self.lib.items()), 2) - self.assertIsNotNone(self.lib.items('title:track 1').get()) + self.assertIsNotNone(self.lib.items(u'title:track 1').get()) class IncrementalImportTest(unittest.TestCase, TestHelper): @@ -1222,14 +1354,14 @@ def test_invalid_state_file(self): importer = self.create_importer() - with open(self.config['statefile'].as_filename(), 'w') as f: - f.write('000') + with open(self.config['statefile'].as_filename(), 'wb') as f: + f.write(b'000') importer.run() self.assertEqual(len(self.lib.albums()), 1) def _mkmp3(path): - shutil.copyfile(os.path.join(_common.RSRC, 'min.mp3'), path) + shutil.copyfile(os.path.join(_common.RSRC, b'min.mp3'), path) class AlbumsInDirTest(_common.TestCase): @@ -1237,20 +1369,20 @@ super(AlbumsInDirTest, self).setUp() # create a directory structure for testing - self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir')) + self.base = os.path.abspath(os.path.join(self.temp_dir, b'tempdir')) os.mkdir(self.base) - os.mkdir(os.path.join(self.base, 'album1')) - os.mkdir(os.path.join(self.base, 'album2')) - os.mkdir(os.path.join(self.base, 'more')) - os.mkdir(os.path.join(self.base, 'more', 'album3')) - os.mkdir(os.path.join(self.base, 'more', 'album4')) - - _mkmp3(os.path.join(self.base, 'album1', 'album1song1.mp3')) - _mkmp3(os.path.join(self.base, 'album1', 'album1song2.mp3')) - _mkmp3(os.path.join(self.base, 'album2', 'album2song.mp3')) - _mkmp3(os.path.join(self.base, 'more', 'album3', 'album3song.mp3')) - _mkmp3(os.path.join(self.base, 'more', 'album4', 'album4song.mp3')) + os.mkdir(os.path.join(self.base, b'album1')) + os.mkdir(os.path.join(self.base, b'album2')) + os.mkdir(os.path.join(self.base, b'more')) + os.mkdir(os.path.join(self.base, b'more', b'album3')) + os.mkdir(os.path.join(self.base, b'more', b'album4')) + + _mkmp3(os.path.join(self.base, b'album1', b'album1song1.mp3')) + _mkmp3(os.path.join(self.base, b'album1', b'album1song2.mp3')) + _mkmp3(os.path.join(self.base, b'album2', b'album2song.mp3')) + _mkmp3(os.path.join(self.base, b'more', b'album3', b'album3song.mp3')) + _mkmp3(os.path.join(self.base, b'more', b'album4', b'album4song.mp3')) def test_finds_all_albums(self): albums = list(albums_in_dir(self.base)) @@ -1259,95 +1391,479 @@ def test_separates_contents(self): found = [] for _, album in albums_in_dir(self.base): - found.append(re.search(r'album(.)song', album[0]).group(1)) - self.assertTrue('1' in found) - self.assertTrue('2' in found) - self.assertTrue('3' in found) - self.assertTrue('4' in found) + found.append(re.search(br'album(.)song', album[0]).group(1)) + self.assertTrue(b'1' in found) + self.assertTrue(b'2' in found) + self.assertTrue(b'3' in found) + self.assertTrue(b'4' in found) def test_finds_multiple_songs(self): for _, album in albums_in_dir(self.base): - n = re.search(r'album(.)song', album[0]).group(1) - if n == '1': + n = re.search(br'album(.)song', album[0]).group(1) + if n == b'1': self.assertEqual(len(album), 2) else: self.assertEqual(len(album), 1) class MultiDiscAlbumsInDirTest(_common.TestCase): - def setUp(self): - super(MultiDiscAlbumsInDirTest, self).setUp() + def create_music(self, files=True, ascii=True): + """Create some music in multiple album directories. - self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir')) + `files` indicates whether to create the files (otherwise, only + directories are made). `ascii` indicates ACII-only filenames; + otherwise, we use Unicode names. + """ + self.base = os.path.abspath(os.path.join(self.temp_dir, b'tempdir')) os.mkdir(self.base) + name = b'CAT' if ascii else util.bytestring_path(u'C\xc1T') + name_alt_case = b'CAt' if ascii else util.bytestring_path(u'C\xc1t') + self.dirs = [ # Nested album, multiple subdirs. # Also, false positive marker in root dir, and subtitle for disc 3. - os.path.join(self.base, 'ABCD1234'), - os.path.join(self.base, 'ABCD1234', 'cd 1'), - os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus'), + os.path.join(self.base, b'ABCD1234'), + os.path.join(self.base, b'ABCD1234', b'cd 1'), + os.path.join(self.base, b'ABCD1234', b'cd 3 - bonus'), # Nested album, single subdir. # Also, punctuation between marker and disc number. - os.path.join(self.base, 'album'), - os.path.join(self.base, 'album', 'cd _ 1'), + os.path.join(self.base, b'album'), + os.path.join(self.base, b'album', b'cd _ 1'), # Flattened album, case typo. # Also, false positive marker in parent dir. - os.path.join(self.base, 'artist [CD5]'), - os.path.join(self.base, 'artist [CD5]', 'CAT disc 1'), - os.path.join(self.base, 'artist [CD5]', 'CAt disc 2'), + os.path.join(self.base, b'artist [CD5]'), + os.path.join(self.base, b'artist [CD5]', name + b' disc 1'), + os.path.join(self.base, b'artist [CD5]', + name_alt_case + b' disc 2'), # Single disc album, sorted between CAT discs. - os.path.join(self.base, 'artist [CD5]', 'CATS'), + os.path.join(self.base, b'artist [CD5]', name + b'S'), ] self.files = [ - os.path.join(self.base, 'ABCD1234', 'cd 1', 'song1.mp3'), - os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song2.mp3'), - os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song3.mp3'), - os.path.join(self.base, 'album', 'cd _ 1', 'song4.mp3'), - os.path.join(self.base, 'artist [CD5]', 'CAT disc 1', 'song5.mp3'), - os.path.join(self.base, 'artist [CD5]', 'CAt disc 2', 'song6.mp3'), - os.path.join(self.base, 'artist [CD5]', 'CATS', 'song7.mp3'), + os.path.join(self.base, b'ABCD1234', b'cd 1', b'song1.mp3'), + os.path.join(self.base, b'ABCD1234', + b'cd 3 - bonus', b'song2.mp3'), + os.path.join(self.base, b'ABCD1234', + b'cd 3 - bonus', b'song3.mp3'), + os.path.join(self.base, b'album', b'cd _ 1', b'song4.mp3'), + os.path.join(self.base, b'artist [CD5]', name + b' disc 1', + b'song5.mp3'), + os.path.join(self.base, b'artist [CD5]', + name_alt_case + b' disc 2', b'song6.mp3'), + os.path.join(self.base, b'artist [CD5]', name + b'S', + b'song7.mp3'), ] + if not ascii: + self.dirs = [self._normalize_path(p) for p in self.dirs] + self.files = [self._normalize_path(p) for p in self.files] + for path in self.dirs: - os.mkdir(path) - for path in self.files: - _mkmp3(path) + os.mkdir(util.syspath(path)) + if files: + for path in self.files: + _mkmp3(util.syspath(path)) + + def _normalize_path(self, path): + """Normalize a path's Unicode combining form according to the + platform. + """ + path = path.decode('utf8') + norm_form = 'NFD' if sys.platform == 'darwin' else 'NFC' + path = unicodedata.normalize(norm_form, path) + return path.encode('utf8') def test_coalesce_nested_album_multiple_subdirs(self): + self.create_music() albums = list(albums_in_dir(self.base)) - self.assertEquals(len(albums), 4) + self.assertEqual(len(albums), 4) root, items = albums[0] - self.assertEquals(root, self.dirs[0:3]) - self.assertEquals(len(items), 3) + self.assertEqual(root, self.dirs[0:3]) + self.assertEqual(len(items), 3) def test_coalesce_nested_album_single_subdir(self): + self.create_music() albums = list(albums_in_dir(self.base)) root, items = albums[1] - self.assertEquals(root, self.dirs[3:5]) - self.assertEquals(len(items), 1) + self.assertEqual(root, self.dirs[3:5]) + self.assertEqual(len(items), 1) def test_coalesce_flattened_album_case_typo(self): + self.create_music() albums = list(albums_in_dir(self.base)) root, items = albums[2] - self.assertEquals(root, self.dirs[6:8]) - self.assertEquals(len(items), 2) + self.assertEqual(root, self.dirs[6:8]) + self.assertEqual(len(items), 2) def test_single_disc_album(self): + self.create_music() albums = list(albums_in_dir(self.base)) root, items = albums[3] - self.assertEquals(root, self.dirs[8:]) - self.assertEquals(len(items), 1) + self.assertEqual(root, self.dirs[8:]) + self.assertEqual(len(items), 1) def test_do_not_yield_empty_album(self): - # Remove all the MP3s. - for path in self.files: - os.remove(path) + self.create_music(files=False) + albums = list(albums_in_dir(self.base)) + self.assertEqual(len(albums), 0) + + def test_single_disc_unicode(self): + self.create_music(ascii=False) + albums = list(albums_in_dir(self.base)) + root, items = albums[3] + self.assertEqual(root, self.dirs[8:]) + self.assertEqual(len(items), 1) + + def test_coalesce_multiple_unicode(self): + self.create_music(ascii=False) albums = list(albums_in_dir(self.base)) - self.assertEquals(len(albums), 0) + self.assertEqual(len(albums), 4) + root, items = albums[0] + self.assertEqual(root, self.dirs[0:3]) + self.assertEqual(len(items), 3) + + +class ReimportTest(unittest.TestCase, ImportHelper, _common.Assertions): + """Test "re-imports", in which the autotagging machinery is used for + music that's already in the library. + + This works by importing new database entries for the same files and + replacing the old data with the new data. We also copy over flexible + attributes and the added date. + """ + + def setUp(self): + self.setup_beets() + + # The existing album. + album = self.add_album_fixture() + album.added = 4242.0 + album.foo = u'bar' # Some flexible attribute. + album.store() + item = album.items().get() + item.baz = u'qux' + item.added = 4747.0 + item.store() + + # Set up an import pipeline with a "good" match. + self.matcher = AutotagStub().install() + self.matcher.matching = AutotagStub.GOOD + + def tearDown(self): + self.teardown_beets() + self.matcher.restore() + + def _setup_session(self, singletons=False): + self._setup_import_session(self._album().path, singletons=singletons) + self.importer.add_choice(importer.action.APPLY) + + def _album(self): + return self.lib.albums().get() + + def _item(self): + return self.lib.items().get() + + def test_reimported_album_gets_new_metadata(self): + self._setup_session() + self.assertEqual(self._album().album, u'\xe4lbum') + self.importer.run() + self.assertEqual(self._album().album, u'the album') + + def test_reimported_album_preserves_flexattr(self): + self._setup_session() + self.importer.run() + self.assertEqual(self._album().foo, u'bar') + + def test_reimported_album_preserves_added(self): + self._setup_session() + self.importer.run() + self.assertEqual(self._album().added, 4242.0) + + def test_reimported_album_preserves_item_flexattr(self): + self._setup_session() + self.importer.run() + self.assertEqual(self._item().baz, u'qux') + + def test_reimported_album_preserves_item_added(self): + self._setup_session() + self.importer.run() + self.assertEqual(self._item().added, 4747.0) + + def test_reimported_item_gets_new_metadata(self): + self._setup_session(True) + self.assertEqual(self._item().title, u't\xeftle 0') + self.importer.run() + self.assertEqual(self._item().title, u'full') + + def test_reimported_item_preserves_flexattr(self): + self._setup_session(True) + self.importer.run() + self.assertEqual(self._item().baz, u'qux') + + def test_reimported_item_preserves_added(self): + self._setup_session(True) + self.importer.run() + self.assertEqual(self._item().added, 4747.0) + + def test_reimported_item_preserves_art(self): + self._setup_session() + art_source = os.path.join(_common.RSRC, b'abbey.jpg') + replaced_album = self._album() + replaced_album.set_art(art_source) + replaced_album.store() + old_artpath = replaced_album.artpath + self.importer.run() + new_album = self._album() + new_artpath = new_album.art_destination(art_source) + self.assertEqual(new_album.artpath, new_artpath) + self.assertExists(new_artpath) + if new_artpath != old_artpath: + self.assertNotExists(old_artpath) + + +class ImportPretendTest(_common.TestCase, ImportHelper): + """ Test the pretend commandline option + """ + + def __init__(self, method_name='runTest'): + super(ImportPretendTest, self).__init__(method_name) + self.matcher = None + + def setUp(self): + super(ImportPretendTest, self).setUp() + self.setup_beets() + self.__create_import_dir() + self.__create_empty_import_dir() + self._setup_import_session() + config['import']['pretend'] = True + self.matcher = AutotagStub().install() + self.io.install() + + def tearDown(self): + self.teardown_beets() + self.matcher.restore() + + def __create_import_dir(self): + self._create_import_dir(1) + resource_path = os.path.join(_common.RSRC, b'empty.mp3') + single_path = os.path.join(self.import_dir, b'track_2.mp3') + shutil.copy(resource_path, single_path) + self.import_paths = [ + os.path.join(self.import_dir, b'the_album'), + single_path + ] + self.import_files = [ + displayable_path( + os.path.join(self.import_paths[0], b'track_1.mp3')), + displayable_path(single_path) + ] + + def __create_empty_import_dir(self): + path = os.path.join(self.temp_dir, b'empty') + os.makedirs(path) + self.empty_path = path + + def __run(self, import_paths, singletons=True): + self._setup_import_session(singletons=singletons) + self.importer.paths = import_paths + + with capture_log() as logs: + self.importer.run() + + logs = [line for line in logs if not line.startswith('Sending event:')] + + self.assertEqual(len(self.lib.items()), 0) + self.assertEqual(len(self.lib.albums()), 0) + + return logs + + def test_import_singletons_pretend(self): + logs = self.__run(self.import_paths) + + self.assertEqual(logs, [ + 'Singleton: %s' % displayable_path(self.import_files[0]), + 'Singleton: %s' % displayable_path(self.import_paths[1])]) + + def test_import_album_pretend(self): + logs = self.__run(self.import_paths, singletons=False) + + self.assertEqual(logs, [ + 'Album: %s' % displayable_path(self.import_paths[0]), + ' %s' % displayable_path(self.import_files[0]), + 'Album: %s' % displayable_path(self.import_paths[1]), + ' %s' % displayable_path(self.import_paths[1])]) + + def test_import_pretend_empty(self): + logs = self.__run([self.empty_path]) + + self.assertEqual(logs, [u'No files imported from {0}' + .format(displayable_path(self.empty_path))]) + + +class ImportMusicBrainzIdTest(_common.TestCase, ImportHelper): + """Test the --musicbrainzid argument.""" + + MB_RELEASE_PREFIX = 'https://musicbrainz.org/release/' + MB_RECORDING_PREFIX = 'https://musicbrainz.org/recording/' + ID_RELEASE_0 = '00000000-0000-0000-0000-000000000000' + ID_RELEASE_1 = '11111111-1111-1111-1111-111111111111' + ID_RECORDING_0 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + ID_RECORDING_1 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' + + def setUp(self): + self.setup_beets() + self._create_import_dir(1) + + # Patch calls to musicbrainzngs. + self.release_patcher = patch('musicbrainzngs.get_release_by_id', + side_effect=mocked_get_release_by_id) + self.recording_patcher = patch('musicbrainzngs.get_recording_by_id', + side_effect=mocked_get_recording_by_id) + self.release_patcher.start() + self.recording_patcher.start() + + def tearDown(self): + self.recording_patcher.stop() + self.release_patcher.stop() + self.teardown_beets() + + def test_one_mbid_one_album(self): + self.config['import']['search_ids'] = \ + [self.MB_RELEASE_PREFIX + self.ID_RELEASE_0] + self._setup_import_session() + + self.importer.add_choice(importer.action.APPLY) + self.importer.run() + self.assertEqual(self.lib.albums().get().album, 'VALID_RELEASE_0') + + def test_several_mbid_one_album(self): + self.config['import']['search_ids'] = \ + [self.MB_RELEASE_PREFIX + self.ID_RELEASE_0, + self.MB_RELEASE_PREFIX + self.ID_RELEASE_1] + self._setup_import_session() + + self.importer.add_choice(2) # Pick the 2nd best match (release 1). + self.importer.add_choice(importer.action.APPLY) + self.importer.run() + self.assertEqual(self.lib.albums().get().album, 'VALID_RELEASE_1') + + def test_one_mbid_one_singleton(self): + self.config['import']['search_ids'] = \ + [self.MB_RECORDING_PREFIX + self.ID_RECORDING_0] + self._setup_import_session(singletons=True) + + self.importer.add_choice(importer.action.APPLY) + self.importer.run() + self.assertEqual(self.lib.items().get().title, 'VALID_RECORDING_0') + + def test_several_mbid_one_singleton(self): + self.config['import']['search_ids'] = \ + [self.MB_RECORDING_PREFIX + self.ID_RECORDING_0, + self.MB_RECORDING_PREFIX + self.ID_RECORDING_1] + self._setup_import_session(singletons=True) + + self.importer.add_choice(2) # Pick the 2nd best match (recording 1). + self.importer.add_choice(importer.action.APPLY) + self.importer.run() + self.assertEqual(self.lib.items().get().title, 'VALID_RECORDING_1') + + def test_candidates_album(self): + """Test directly ImportTask.lookup_candidates().""" + task = importer.ImportTask(paths=self.import_dir, + toppath='top path', + items=[_common.item()]) + task.search_ids = [self.MB_RELEASE_PREFIX + self.ID_RELEASE_0, + self.MB_RELEASE_PREFIX + self.ID_RELEASE_1, + 'an invalid and discarded id'] + + task.lookup_candidates() + self.assertEqual(set(['VALID_RELEASE_0', 'VALID_RELEASE_1']), + set([c.info.album for c in task.candidates])) + + def test_candidates_singleton(self): + """Test directly SingletonImportTask.lookup_candidates().""" + task = importer.SingletonImportTask(toppath='top path', + item=_common.item()) + task.search_ids = [self.MB_RECORDING_PREFIX + self.ID_RECORDING_0, + self.MB_RECORDING_PREFIX + self.ID_RECORDING_1, + 'an invalid and discarded id'] + + task.lookup_candidates() + self.assertEqual(set(['VALID_RECORDING_0', 'VALID_RECORDING_1']), + set([c.info.title for c in task.candidates])) + + +# Helpers for ImportMusicBrainzIdTest. + + +def mocked_get_release_by_id(id_, includes=[], release_status=[], + release_type=[]): + """Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list + of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in + the release title and artist name, so that ID_RELEASE_0 is a closer match + to the items created by ImportHelper._create_import_dir().""" + # Map IDs to (release title, artist), so the distances are different. + releases = {ImportMusicBrainzIdTest.ID_RELEASE_0: ('VALID_RELEASE_0', + 'TAG ARTIST'), + ImportMusicBrainzIdTest.ID_RELEASE_1: ('VALID_RELEASE_1', + 'DISTANT_MATCH')} + + return { + 'release': { + 'title': releases[id_][0], + 'id': id_, + 'medium-list': [{ + 'track-list': [{ + 'recording': { + 'title': 'foo', + 'id': 'bar', + 'length': 59, + }, + 'position': 9, + }], + 'position': 5, + }], + 'artist-credit': [{ + 'artist': { + 'name': releases[id_][1], + 'id': 'some-id', + }, + }], + 'release-group': { + 'id': 'another-id', + } + } + } + + +def mocked_get_recording_by_id(id_, includes=[], release_status=[], + release_type=[]): + """Mimic musicbrainzngs.get_recording_by_id, accepting only a restricted + list of MB ids (ID_RECORDING_0, ID_RECORDING_1). The returned dict differs + only in the recording title and artist name, so that ID_RECORDING_0 is a + closer match to the items created by ImportHelper._create_import_dir().""" + # Map IDs to (recording title, artist), so the distances are different. + releases = {ImportMusicBrainzIdTest.ID_RECORDING_0: ('VALID_RECORDING_0', + 'TAG ARTIST'), + ImportMusicBrainzIdTest.ID_RECORDING_1: ('VALID_RECORDING_1', + 'DISTANT_MATCH')} + + return { + 'recording': { + 'title': releases[id_][0], + 'id': id_, + 'length': 59, + 'artist-credit': [{ + 'artist': { + 'name': releases[id_][1], + 'id': 'some-id', + }, + }], + } + } def suite(): diff -Nru beets-1.3.8+dfsg/test/test_importfeeds.py beets-1.3.19/test/test_importfeeds.py --- beets-1.3.8+dfsg/test/test_importfeeds.py 2014-04-27 03:49:51.000000000 +0000 +++ beets-1.3.19/test/test_importfeeds.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,12 +1,16 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + import os import os.path import tempfile import shutil -from _common import unittest +from test._common import unittest from beets import config from beets.library import Item, Album, Library -from beetsplug.importfeeds import album_imported, ImportFeedsPlugin +from beetsplug.importfeeds import ImportFeedsPlugin class ImportfeedsTestTest(unittest.TestCase): @@ -30,13 +34,30 @@ self.lib.add(album) self.lib.add(item) - album_imported(self.lib, album) + self.importfeeds.album_imported(self.lib, album) playlist_path = os.path.join(self.feeds_dir, os.listdir(self.feeds_dir)[0]) self.assertTrue(playlist_path.endswith('album_name.m3u')) with open(playlist_path) as playlist: self.assertIn(item_path, playlist.read()) + def test_playlist_in_subdir(self): + config['importfeeds']['formats'] = 'm3u' + config['importfeeds']['m3u_name'] = \ + os.path.join('subdir', 'imported.m3u') + album = Album(album='album/name', id=1) + item_path = os.path.join('path', 'to', 'item') + item = Item(title='song', album_id=1, path=item_path) + self.lib.add(album) + self.lib.add(item) + + self.importfeeds.album_imported(self.lib, album) + playlist = os.path.join(self.feeds_dir, + config['importfeeds']['m3u_name'].get()) + playlist_subdir = os.path.dirname(playlist) + self.assertTrue(os.path.isdir(playlist_subdir)) + self.assertTrue(os.path.isfile(playlist)) + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_info.py beets-1.3.19/test/test_info.py --- beets-1.3.8+dfsg/test/test_info.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_info.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,10 +13,13 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -from _common import unittest -from helper import TestHelper +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import TestHelper from beets.mediafile import MediaFile +from beets.util import displayable_path class InfoTest(unittest.TestCase, TestHelper): @@ -47,19 +51,20 @@ self.assertIn('disctitle: DDD', out) self.assertIn('genres: a; b; c', out) self.assertNotIn('composer:', out) + self.remove_mediafile_fixtures() def test_item_query(self): - items = self.add_item_fixtures(count=2) - items[0].album = 'xxxx' - items[0].write() - items[0].album = 'yyyy' - items[0].store() + item1, item2 = self.add_item_fixtures(count=2) + item1.album = 'xxxx' + item1.write() + item1.album = 'yyyy' + item1.store() out = self.run_with_output('album:yyyy') - self.assertIn(items[0].path, out) - self.assertIn('album: xxxx', out) + self.assertIn(displayable_path(item1.path), out) + self.assertIn(u'album: xxxx', out) - self.assertNotIn(items[1].path, out) + self.assertNotIn(displayable_path(item2.path), out) def test_item_library_query(self): item, = self.add_item_fixtures() @@ -67,8 +72,8 @@ item.store() out = self.run_with_output('--library', 'album:xxxx') - self.assertIn(item.path, out) - self.assertIn('album: xxxx', out) + self.assertIn(displayable_path(item.path), out) + self.assertIn(u'album: xxxx', out) def test_collect_item_and_path(self): path = self.create_mediafile_fixture() @@ -85,9 +90,27 @@ mediafile.save() out = self.run_with_output('--summarize', 'album:AAA', path) - self.assertIn('album: AAA', out) - self.assertIn('tracktotal: 5', out) - self.assertIn('title: [various]', out) + self.assertIn(u'album: AAA', out) + self.assertIn(u'tracktotal: 5', out) + self.assertIn(u'title: [various]', out) + self.remove_mediafile_fixtures() + + def test_include_pattern(self): + item, = self.add_item_fixtures() + item.album = 'xxxx' + item.store() + + out = self.run_with_output('--library', 'album:xxxx', + '--include-keys', '*lbu*') + self.assertIn(displayable_path(item.path), out) + self.assertNotIn(u'title:', out) + self.assertIn(u'album: xxxx', out) + + def test_custom_format(self): + self.add_item_fixtures() + out = self.run_with_output('--library', '--format', + '$track. $title - $artist ($length)') + self.assertEqual(u'02. tïtle 0 - the artist (0:01)\n', out) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_ipfs.py beets-1.3.19/test/test_ipfs.py --- beets-1.3.8+dfsg/test/test_ipfs.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_ipfs.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +from mock import patch + +from beets import library +from beets.util import bytestring_path +from beetsplug.ipfs import IPFSPlugin + +from test import _common +from test._common import unittest +from test.helper import TestHelper +import os + + +class IPFSPluginTest(unittest.TestCase, TestHelper): + + def setUp(self): + self.setup_beets() + self.load_plugins('ipfs') + self.patcher = patch('beets.util.command_output') + self.command_output = self.patcher.start() + self.lib = library.Library(":memory:") + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + self.patcher.stop() + + def test_stored_hashes(self): + test_album = self.mk_test_album() + ipfs = IPFSPlugin() + added_albums = ipfs.ipfs_added_albums(self.lib, self.lib.path) + added_album = added_albums.get_album(1) + self.assertEqual(added_album.ipfs, test_album.ipfs) + found = False + want_item = test_album.items()[2] + for check_item in added_album.items(): + try: + if check_item.ipfs: + want_path = '/ipfs/{0}/{1}'.format(test_album.ipfs, + os.path.basename( + want_item.path)) + want_path = bytestring_path(want_path) + self.assertEqual(check_item.path, want_path) + self.assertEqual(check_item.ipfs, want_item.ipfs) + self.assertEqual(check_item.title, want_item.title) + found = True + except AttributeError: + pass + self.assertTrue(found) + + def mk_test_album(self): + items = [_common.item() for _ in range(3)] + items[0].title = 'foo bar' + items[0].artist = 'one' + items[0].album = 'baz' + items[0].year = 2001 + items[0].comp = True + items[1].title = 'baz qux' + items[1].artist = 'two' + items[1].album = 'baz' + items[1].year = 2002 + items[1].comp = True + items[2].title = 'beets 4 eva' + items[2].artist = 'three' + items[2].album = 'foo' + items[2].year = 2003 + items[2].comp = False + items[2].ipfs = 'QmfM9ic5LJj7V6ecozFx1MkSoaaiq3PXfhJoFvyqzpLXSk' + + for item in items: + self.lib.add(item) + + album = self.lib.add_album(items) + album.ipfs = "QmfM9ic5LJj7V6ecozFx1MkSoaaiq3PXfhJoFvyqzpLXSf" + album.store() + + return album + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_keyfinder.py beets-1.3.19/test/test_keyfinder.py --- beets-1.3.8+dfsg/test/test_keyfinder.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_keyfinder.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,11 +13,14 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + from mock import patch -from _common import unittest -from helper import TestHelper +from test._common import unittest +from test.helper import TestHelper from beets.library import Item +from beets import util class KeyFinderTest(unittest.TestCase, TestHelper): @@ -42,7 +46,7 @@ item.load() self.assertEqual(item['initial_key'], 'C#m') self.command_output.assert_called_with( - ['KeyFinder', '-f', item.path]) + ['KeyFinder', '-f', util.syspath(item.path)]) def test_add_key_on_import(self): self.command_output.return_value = 'dbm' diff -Nru beets-1.3.8+dfsg/test/test_lastgenre.py beets-1.3.19/test/test_lastgenre.py --- beets-1.3.8+dfsg/test/test_lastgenre.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_lastgenre.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Fabrice Laporte. +# Copyright 2016, Fabrice Laporte. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,11 +15,16 @@ """Tests for the 'lastgenre' plugin.""" -from _common import unittest +from __future__ import division, absolute_import, print_function + +from mock import Mock + +from test import _common +from test._common import unittest from beetsplug import lastgenre from beets import config -from helper import TestHelper +from test.helper import TestHelper class LastGenrePluginTest(unittest.TestCase, TestHelper): @@ -45,7 +51,7 @@ """ self._setup_config() self.assertEqual(self.plugin._resolve_genres(['delta blues']), - 'Delta Blues') + u'Delta Blues') def test_c14n_only(self): """Default c14n tree funnels up to most common genre except for *wrong* @@ -53,16 +59,16 @@ """ self._setup_config(canonical=True, count=99) self.assertEqual(self.plugin._resolve_genres(['delta blues']), - 'Blues') + u'Blues') self.assertEqual(self.plugin._resolve_genres(['iota blues']), - 'Iota Blues') + u'Iota Blues') def test_whitelist_only(self): """Default whitelist rejects *wrong* (non existing) genres. """ self._setup_config(whitelist=True) self.assertEqual(self.plugin._resolve_genres(['iota blues']), - '') + u'') def test_whitelist_c14n(self): """Default whitelist and c14n both activated result in all parents @@ -70,7 +76,7 @@ """ self._setup_config(canonical=True, whitelist=True, count=99) self.assertEqual(self.plugin._resolve_genres(['delta blues']), - 'Delta Blues, Blues') + u'Delta Blues, Blues') def test_whitelist_custom(self): """Keep only genres that are in the whitelist. @@ -78,11 +84,11 @@ self._setup_config(whitelist=set(['blues', 'rock', 'jazz']), count=2) self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']), - 'Blues') + u'Blues') self._setup_config(canonical='', whitelist=set(['rock'])) self.assertEqual(self.plugin._resolve_genres(['delta blues']), - '') + u'') def test_count(self): """Keep the n first genres, as we expect them to be sorted from more to @@ -92,7 +98,7 @@ count=2) self.assertEqual(self.plugin._resolve_genres( ['jazz', 'pop', 'rock', 'blues']), - 'Jazz, Rock') + u'Jazz, Rock') def test_count_c14n(self): """Keep the n first genres, after having applied c14n when necessary @@ -104,14 +110,14 @@ # second slot self.assertEqual(self.plugin._resolve_genres( ['jazz', 'pop', 'country blues', 'rock']), - 'Jazz, Blues') + u'Jazz, Blues') def test_c14n_whitelist(self): """Genres first pass through c14n and are then filtered """ self._setup_config(canonical=True, whitelist=set(['rock'])) self.assertEqual(self.plugin._resolve_genres(['delta blues']), - '') + u'') def test_empty_string_enables_canonical(self): """For backwards compatibility, setting the `canonical` option @@ -119,7 +125,7 @@ """ self._setup_config(canonical='', count=99) self.assertEqual(self.plugin._resolve_genres(['delta blues']), - 'Blues') + u'Blues') def test_empty_string_enables_whitelist(self): """Again for backwards compatibility, setting the `whitelist` @@ -127,14 +133,84 @@ """ self._setup_config(whitelist='') self.assertEqual(self.plugin._resolve_genres(['iota blues']), - '') + u'') def test_no_duplicate(self): """Remove duplicated genres. """ self._setup_config(count=99) self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']), - 'Blues') + u'Blues') + + def test_tags_for(self): + class MockPylastElem(object): + def __init__(self, name): + self.name = name + + def get_name(self): + return self.name + + class MockPylastObj(object): + def get_top_tags(self): + tag1 = Mock() + tag1.weight = 90 + tag1.item = MockPylastElem(u'Pop') + tag2 = Mock() + tag2.weight = 40 + tag2.item = MockPylastElem(u'Rap') + return [tag1, tag2] + + plugin = lastgenre.LastGenrePlugin() + res = plugin._tags_for(MockPylastObj()) + self.assertEqual(res, [u'pop', u'rap']) + res = plugin._tags_for(MockPylastObj(), min_weight=50) + self.assertEqual(res, [u'pop']) + + def test_get_genre(self): + mock_genres = {'track': u'1', 'album': u'2', 'artist': u'3'} + + def mock_fetch_track_genre(self, obj=None): + return mock_genres['track'] + + def mock_fetch_album_genre(self, obj): + return mock_genres['album'] + + def mock_fetch_artist_genre(self, obj): + return mock_genres['artist'] + + lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre + lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre + lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre + + self._setup_config(whitelist=False) + item = _common.item() + item.genre = mock_genres['track'] + + config['lastgenre'] = {'force': False} + res = self.plugin._get_genre(item) + self.assertEqual(res, (item.genre, u'keep')) + + config['lastgenre'] = {'force': True, 'source': u'track'} + res = self.plugin._get_genre(item) + self.assertEqual(res, (mock_genres['track'], u'track')) + + config['lastgenre'] = {'source': u'album'} + res = self.plugin._get_genre(item) + self.assertEqual(res, (mock_genres['album'], u'album')) + + config['lastgenre'] = {'source': u'artist'} + res = self.plugin._get_genre(item) + self.assertEqual(res, (mock_genres['artist'], u'artist')) + + mock_genres['artist'] = None + res = self.plugin._get_genre(item) + self.assertEqual(res, (item.genre, u'original')) + + config['lastgenre'] = {'fallback': u'rap'} + item.genre = None + res = self.plugin._get_genre(item) + self.assertEqual(res, (config['lastgenre']['fallback'].get(), + u'fallback')) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_library.py beets-1.3.19/test/test_library.py --- beets-1.3.8+dfsg/test/test_library.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_library.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,6 +15,8 @@ """Tests for non-query database functions of Item. """ +from __future__ import division, absolute_import, print_function + import os import os.path import stat @@ -21,16 +24,20 @@ import re import unicodedata import sys +import time -import _common -from _common import unittest -from _common import item +from test import _common +from test._common import unittest +from test._common import item import beets.library import beets.mediafile +import beets.dbcore.query from beets import util from beets import plugins from beets import config from beets.mediafile import MediaFile +from beets.util import syspath, bytestring_path +from test.helper import TestHelper # Shortcut to path normalization. np = util.normpath @@ -39,12 +46,12 @@ class LoadTest(_common.LibTestCase): def test_load_restores_data_from_db(self): original_title = self.i.title - self.i.title = 'something' + self.i.title = u'something' self.i.load() self.assertEqual(original_title, self.i.title) def test_load_clears_dirty_flags(self): - self.i.artist = 'something' + self.i.artist = u'something' self.assertTrue('artist' in self.i._dirty) self.i.load() self.assertTrue('artist' not in self.i._dirty) @@ -61,7 +68,7 @@ def test_store_only_writes_dirty_fields(self): original_genre = self.i.genre - self.i._values_fixed['genre'] = 'beatboxing' # change w/o dirtying + self.i._values_fixed['genre'] = u'beatboxing' # change w/o dirtying self.i.store() new_genre = self.lib._connection().execute( 'select genre from items where ' @@ -69,7 +76,7 @@ self.assertEqual(new_genre, original_genre) def test_store_clears_dirty_flags(self): - self.i.composer = 'tvp' + self.i.composer = u'tvp' self.i.store() self.assertTrue('composer' not in self.i._dirty) @@ -89,7 +96,7 @@ def test_library_add_path_inserts_row(self): i = beets.library.Item.from_path( - os.path.join(_common.RSRC, 'full.mp3') + os.path.join(_common.RSRC, b'full.mp3') ) self.lib.add(i) new_grouping = self.lib._connection().execute( @@ -123,7 +130,7 @@ self.assertTrue('title' not in self.i._dirty) def test_invalid_field_raises_attributeerror(self): - self.assertRaises(AttributeError, getattr, self.i, 'xyzzy') + self.assertRaises(AttributeError, getattr, self.i, u'xyzzy') class DestinationTest(_common.TestCase): @@ -141,18 +148,18 @@ config.read(user=False, defaults=True) def test_directory_works_with_trailing_slash(self): - self.lib.directory = 'one/' - self.lib.path_formats = [('default', 'two')] + self.lib.directory = b'one/' + self.lib.path_formats = [(u'default', u'two')] self.assertEqual(self.i.destination(), np('one/two')) def test_directory_works_without_trailing_slash(self): - self.lib.directory = 'one' - self.lib.path_formats = [('default', 'two')] + self.lib.directory = b'one' + self.lib.path_formats = [(u'default', u'two')] self.assertEqual(self.i.destination(), np('one/two')) def test_destination_substitues_metadata_values(self): - self.lib.directory = 'base' - self.lib.path_formats = [('default', '$album/$artist $title')] + self.lib.directory = b'base' + self.lib.path_formats = [(u'default', u'$album/$artist $title')] self.i.title = 'three' self.i.artist = 'two' self.i.album = 'one' @@ -160,23 +167,23 @@ np('base/one/two three')) def test_destination_preserves_extension(self): - self.lib.directory = 'base' - self.lib.path_formats = [('default', '$title')] + self.lib.directory = b'base' + self.lib.path_formats = [(u'default', u'$title')] self.i.path = 'hey.audioformat' self.assertEqual(self.i.destination(), np('base/the title.audioformat')) def test_lower_case_extension(self): - self.lib.directory = 'base' - self.lib.path_formats = [('default', '$title')] + self.lib.directory = b'base' + self.lib.path_formats = [(u'default', u'$title')] self.i.path = 'hey.MP3' self.assertEqual(self.i.destination(), np('base/the title.mp3')) def test_destination_pads_some_indices(self): - self.lib.directory = 'base' - self.lib.path_formats = [('default', - '$track $tracktotal $disc $disctotal $bpm')] + self.lib.directory = b'base' + self.lib.path_formats = [(u'default', + u'$track $tracktotal $disc $disctotal $bpm')] self.i.track = 1 self.i.tracktotal = 2 self.i.disc = 3 @@ -186,8 +193,8 @@ np('base/01 02 03 04 5')) def test_destination_pads_date_values(self): - self.lib.directory = 'base' - self.lib.path_formats = [('default', '$year-$month-$day')] + self.lib.directory = b'base' + self.lib.path_formats = [(u'default', u'$year-$month-$day')] self.i.year = 1 self.i.month = 2 self.i.day = 3 @@ -197,145 +204,119 @@ def test_destination_escapes_slashes(self): self.i.album = 'one/two' dest = self.i.destination() - self.assertTrue('one' in dest) - self.assertTrue('two' in dest) - self.assertFalse('one/two' in dest) + self.assertTrue(b'one' in dest) + self.assertTrue(b'two' in dest) + self.assertFalse(b'one/two' in dest) def test_destination_escapes_leading_dot(self): self.i.album = '.something' dest = self.i.destination() - self.assertTrue('something' in dest) - self.assertFalse('/.' in dest) + self.assertTrue(b'something' in dest) + self.assertFalse(b'/.' in dest) def test_destination_preserves_legitimate_slashes(self): self.i.artist = 'one' self.i.album = 'two' dest = self.i.destination() - self.assertTrue(os.path.join('one', 'two') in dest) + self.assertTrue(os.path.join(b'one', b'two') in dest) def test_destination_long_names_truncated(self): - self.i.title = 'X' * 300 - self.i.artist = 'Y' * 300 - for c in self.i.destination().split(os.path.sep): + self.i.title = u'X' * 300 + self.i.artist = u'Y' * 300 + for c in self.i.destination().split(util.PATH_SEP): self.assertTrue(len(c) <= 255) def test_destination_long_names_keep_extension(self): - self.i.title = 'X' * 300 - self.i.path = 'something.extn' + self.i.title = u'X' * 300 + self.i.path = b'something.extn' dest = self.i.destination() - self.assertEqual(dest[-5:], '.extn') + self.assertEqual(dest[-5:], b'.extn') def test_distination_windows_removes_both_separators(self): self.i.title = 'one \\ two / three.mp3' with _common.platform_windows(): p = self.i.destination() - self.assertFalse('one \\ two' in p) - self.assertFalse('one / two' in p) - self.assertFalse('two \\ three' in p) - self.assertFalse('two / three' in p) - - def test_sanitize_unix_replaces_leading_dot(self): - with _common.platform_posix(): - p = util.sanitize_path(u'one/.two/three') - self.assertFalse('.' in p) - - def test_sanitize_windows_replaces_trailing_dot(self): - with _common.platform_windows(): - p = util.sanitize_path(u'one/two./three') - self.assertFalse('.' in p) - - def test_sanitize_windows_replaces_illegal_chars(self): - with _common.platform_windows(): - p = util.sanitize_path(u':*?"<>|') - self.assertFalse(':' in p) - self.assertFalse('*' in p) - self.assertFalse('?' in p) - self.assertFalse('"' in p) - self.assertFalse('<' in p) - self.assertFalse('>' in p) - self.assertFalse('|' in p) + self.assertFalse(b'one \\ two' in p) + self.assertFalse(b'one / two' in p) + self.assertFalse(b'two \\ three' in p) + self.assertFalse(b'two / three' in p) def test_path_with_format(self): - self.lib.path_formats = [('default', '$artist/$album ($format)')] + self.lib.path_formats = [(u'default', u'$artist/$album ($format)')] p = self.i.destination() - self.assert_('(FLAC)' in p) + self.assertTrue(b'(FLAC)' in p) def test_heterogeneous_album_gets_single_directory(self): i1, i2 = item(), item() self.lib.add_album([i1, i2]) i1.year, i2.year = 2009, 2010 - self.lib.path_formats = [('default', '$album ($year)/$track $title')] + self.lib.path_formats = [(u'default', u'$album ($year)/$track $title')] dest1, dest2 = i1.destination(), i2.destination() self.assertEqual(os.path.dirname(dest1), os.path.dirname(dest2)) def test_default_path_for_non_compilations(self): self.i.comp = False self.lib.add_album([self.i]) - self.lib.directory = 'one' - self.lib.path_formats = [('default', 'two'), - ('comp:true', 'three')] + self.lib.directory = b'one' + self.lib.path_formats = [(u'default', u'two'), + (u'comp:true', u'three')] self.assertEqual(self.i.destination(), np('one/two')) def test_singleton_path(self): i = item(self.lib) - self.lib.directory = 'one' + self.lib.directory = b'one' self.lib.path_formats = [ - ('default', 'two'), - ('singleton:true', 'four'), - ('comp:true', 'three'), + (u'default', u'two'), + (u'singleton:true', u'four'), + (u'comp:true', u'three'), ] self.assertEqual(i.destination(), np('one/four')) def test_comp_before_singleton_path(self): i = item(self.lib) i.comp = True - self.lib.directory = 'one' + self.lib.directory = b'one' self.lib.path_formats = [ - ('default', 'two'), - ('comp:true', 'three'), - ('singleton:true', 'four'), + (u'default', u'two'), + (u'comp:true', u'three'), + (u'singleton:true', u'four'), ] self.assertEqual(i.destination(), np('one/three')) def test_comp_path(self): self.i.comp = True self.lib.add_album([self.i]) - self.lib.directory = 'one' + self.lib.directory = b'one' self.lib.path_formats = [ - ('default', 'two'), - ('comp:true', 'three'), + (u'default', u'two'), + (u'comp:true', u'three'), ] self.assertEqual(self.i.destination(), np('one/three')) def test_albumtype_query_path(self): self.i.comp = True self.lib.add_album([self.i]) - self.i.albumtype = 'sometype' - self.lib.directory = 'one' + self.i.albumtype = u'sometype' + self.lib.directory = b'one' self.lib.path_formats = [ - ('default', 'two'), - ('albumtype:sometype', 'four'), - ('comp:true', 'three'), + (u'default', u'two'), + (u'albumtype:sometype', u'four'), + (u'comp:true', u'three'), ] self.assertEqual(self.i.destination(), np('one/four')) def test_albumtype_path_fallback_to_comp(self): self.i.comp = True self.lib.add_album([self.i]) - self.i.albumtype = 'sometype' - self.lib.directory = 'one' + self.i.albumtype = u'sometype' + self.lib.directory = b'one' self.lib.path_formats = [ - ('default', 'two'), - ('albumtype:anothertype', 'four'), - ('comp:true', 'three'), + (u'default', u'two'), + (u'albumtype:anothertype', u'four'), + (u'comp:true', u'three'), ] self.assertEqual(self.i.destination(), np('one/three')) - def test_sanitize_windows_replaces_trailing_space(self): - with _common.platform_windows(): - p = util.sanitize_path(u'one/two /three') - self.assertFalse(' ' in p) - def test_get_formatted_does_not_replace_separators(self): with _common.platform_posix(): name = os.path.join('a', 'b') @@ -374,61 +355,42 @@ self.assertEqual(val, u'') def test_artist_falls_back_to_albumartist(self): - self.i.artist = '' - self.i.albumartist = 'something' - self.lib.path_formats = [('default', '$artist')] + self.i.artist = u'' + self.i.albumartist = u'something' + self.lib.path_formats = [(u'default', u'$artist')] p = self.i.destination() - self.assertEqual(p.rsplit(os.path.sep, 1)[1], 'something') + self.assertEqual(p.rsplit(util.PATH_SEP, 1)[1], b'something') def test_albumartist_falls_back_to_artist(self): - self.i.artist = 'trackartist' - self.i.albumartist = '' - self.lib.path_formats = [('default', '$albumartist')] + self.i.artist = u'trackartist' + self.i.albumartist = u'' + self.lib.path_formats = [(u'default', u'$albumartist')] p = self.i.destination() - self.assertEqual(p.rsplit(os.path.sep, 1)[1], 'trackartist') + self.assertEqual(p.rsplit(util.PATH_SEP, 1)[1], b'trackartist') def test_artist_overrides_albumartist(self): - self.i.artist = 'theartist' - self.i.albumartist = 'something' - self.lib.path_formats = [('default', '$artist')] + self.i.artist = u'theartist' + self.i.albumartist = u'something' + self.lib.path_formats = [(u'default', u'$artist')] p = self.i.destination() - self.assertEqual(p.rsplit(os.path.sep, 1)[1], 'theartist') + self.assertEqual(p.rsplit(util.PATH_SEP, 1)[1], b'theartist') def test_albumartist_overrides_artist(self): - self.i.artist = 'theartist' - self.i.albumartist = 'something' - self.lib.path_formats = [('default', '$albumartist')] + self.i.artist = u'theartist' + self.i.albumartist = u'something' + self.lib.path_formats = [(u'default', u'$albumartist')] p = self.i.destination() - self.assertEqual(p.rsplit(os.path.sep, 1)[1], 'something') - - def test_sanitize_path_works_on_empty_string(self): - with _common.platform_posix(): - p = util.sanitize_path(u'') - self.assertEqual(p, u'') - - def test_sanitize_with_custom_replace_overrides_built_in_sub(self): - with _common.platform_posix(): - p = util.sanitize_path(u'a/.?/b', [ - (re.compile(ur'foo'), u'bar'), - ]) - self.assertEqual(p, u'a/.?/b') - - def test_sanitize_with_custom_replace_adds_replacements(self): - with _common.platform_posix(): - p = util.sanitize_path(u'foo/bar', [ - (re.compile(ur'foo'), u'bar'), - ]) - self.assertEqual(p, u'bar/bar') + self.assertEqual(p.rsplit(util.PATH_SEP, 1)[1], b'something') def test_unicode_normalized_nfd_on_mac(self): instr = unicodedata.normalize('NFC', u'caf\xe9') - self.lib.path_formats = [('default', instr)] + self.lib.path_formats = [(u'default', instr)] dest = self.i.destination(platform='darwin', fragment=True) self.assertEqual(dest, unicodedata.normalize('NFD', instr)) def test_unicode_normalized_nfc_on_linux(self): instr = unicodedata.normalize('NFD', u'caf\xe9') - self.lib.path_formats = [('default', instr)] + self.lib.path_formats = [(u'default', instr)] dest = self.i.destination(platform='linux2', fragment=True) self.assertEqual(dest, unicodedata.normalize('NFC', instr)) @@ -437,16 +399,16 @@ sys.getfilesystemencoding = lambda: 'mbcs' try: self.i.title = u'h\u0259d' - self.lib.path_formats = [('default', '$title')] + self.lib.path_formats = [(u'default', u'$title')] p = self.i.destination() - self.assertFalse('?' in p) + self.assertFalse(b'?' in p) # We use UTF-8 to encode Windows paths now. self.assertTrue(u'h\u0259d'.encode('utf8') in p) finally: sys.getfilesystemencoding = oldfunc def test_unicode_extension_in_fragment(self): - self.lib.path_formats = [('default', u'foo')] + self.lib.path_formats = [(u'default', u'foo')] self.i.path = util.bytestring_path(u'bar.caf\xe9') dest = self.i.destination(platform='linux2', fragment=True) self.assertEqual(dest, u'foo.caf\xe9') @@ -454,16 +416,79 @@ def test_asciify_and_replace(self): config['asciify_paths'] = True self.lib.replacements = [(re.compile(u'"'), u'q')] - self.lib.directory = 'lib' - self.lib.path_formats = [('default', '$title')] + self.lib.directory = b'lib' + self.lib.path_formats = [(u'default', u'$title')] self.i.title = u'\u201c\u00f6\u2014\u00cf\u201d' self.assertEqual(self.i.destination(), np('lib/qo--Iq')) + def test_destination_with_replacements(self): + self.lib.directory = b'base' + self.lib.replacements = [(re.compile(r'a'), u'e')] + self.lib.path_formats = [(u'default', u'$album/$title')] + self.i.title = u'foo' + self.i.album = u'bar' + self.assertEqual(self.i.destination(), + np('base/ber/foo')) + + @unittest.skip('unimplemented: #359') + def test_destination_with_empty_component(self): + self.lib.directory = b'base' + self.lib.replacements = [(re.compile(r'^$'), u'_')] + self.lib.path_formats = [(u'default', u'$album/$artist/$title')] + self.i.title = u'three' + self.i.artist = u'' + self.i.albumartist = u'' + self.i.album = u'one' + self.assertEqual(self.i.destination(), + np('base/one/_/three')) + + @unittest.skip('unimplemented: #359') + def test_destination_with_empty_final_component(self): + self.lib.directory = b'base' + self.lib.replacements = [(re.compile(r'^$'), u'_')] + self.lib.path_formats = [(u'default', u'$album/$title')] + self.i.title = u'' + self.i.album = u'one' + self.i.path = 'foo.mp3' + self.assertEqual(self.i.destination(), + np('base/one/_.mp3')) + + def test_legalize_path_one_for_one_replacement(self): + # Use a replacement that should always replace the last X in any + # path component with a Z. + self.lib.replacements = [ + (re.compile(r'X$'), u'Z'), + ] + + # Construct an item whose untruncated path ends with a Y but whose + # truncated version ends with an X. + self.i.title = u'X' * 300 + u'Y' + + # The final path should reflect the replacement. + dest = self.i.destination() + self.assertEqual(dest[-2:], b'XZ') + + def test_legalize_path_one_for_many_replacement(self): + # Use a replacement that should always replace the last X in any + # path component with four Zs. + self.lib.replacements = [ + (re.compile(r'X$'), u'ZZZZ'), + ] + + # Construct an item whose untruncated path ends with a Y but whose + # truncated version ends with an X. + self.i.title = u'X' * 300 + u'Y' + + # The final path should ignore the user replacement and create a path + # of the correct length, containing Xs. + dest = self.i.destination() + self.assertEqual(dest[-2:], b'XX') + class ItemFormattedMappingTest(_common.LibTestCase): def test_formatted_item_value(self): formatted = self.i.formatted() - self.assertEqual(formatted['artist'], 'the artist') + self.assertEqual(formatted['artist'], u'the artist') def test_get_unset_field(self): formatted = self.i.formatted() @@ -476,53 +501,53 @@ def test_get_method_with_specified_default(self): formatted = self.i.formatted() - self.assertEqual(formatted.get('other_field', 'default'), 'default') + self.assertEqual(formatted.get('other_field', u'default'), u'default') def test_item_precedence(self): album = self.lib.add_album([self.i]) - album['artist'] = 'foo' + album['artist'] = u'foo' album.store() - self.assertNotEqual('foo', self.i.formatted().get('artist')) + self.assertNotEqual(u'foo', self.i.formatted().get('artist')) def test_album_flex_field(self): album = self.lib.add_album([self.i]) - album['flex'] = 'foo' + album['flex'] = u'foo' album.store() - self.assertEqual('foo', self.i.formatted().get('flex')) + self.assertEqual(u'foo', self.i.formatted().get('flex')) def test_album_field_overrides_item_field_for_path(self): # Make the album inconsistent with the item. album = self.lib.add_album([self.i]) - album.album = 'foo' + album.album = u'foo' album.store() - self.i.album = 'bar' + self.i.album = u'bar' self.i.store() # Ensure the album takes precedence. formatted = self.i.formatted(for_path=True) - self.assertEqual(formatted['album'], 'foo') + self.assertEqual(formatted['album'], u'foo') def test_artist_falls_back_to_albumartist(self): - self.i.artist = '' + self.i.artist = u'' formatted = self.i.formatted() - self.assertEqual(formatted['artist'], 'the album artist') + self.assertEqual(formatted['artist'], u'the album artist') def test_albumartist_falls_back_to_artist(self): - self.i.albumartist = '' + self.i.albumartist = u'' formatted = self.i.formatted() - self.assertEqual(formatted['albumartist'], 'the artist') + self.assertEqual(formatted['albumartist'], u'the artist') def test_both_artist_and_albumartist_empty(self): - self.i.artist = '' - self.i.albumartist = '' + self.i.artist = u'' + self.i.albumartist = u'' formatted = self.i.formatted() - self.assertEqual(formatted['albumartist'], '') + self.assertEqual(formatted['albumartist'], u'') class PathFormattingMixin(object): """Utilities for testing path formatting.""" def _setf(self, fmt): - self.lib.path_formats.insert(0, ('default', fmt)) + self.lib.path_formats.insert(0, (u'default', fmt)) def _assert_dest(self, dest, i=None): if i is None: @@ -536,8 +561,8 @@ def setUp(self): super(DestinationFunctionTest, self).setUp() self.lib = beets.library.Library(':memory:') - self.lib.directory = '/base' - self.lib.path_formats = [('default', u'path')] + self.lib.directory = b'/base' + self.lib.path_formats = [(u'default', u'path')] self.i = item(self.lib) def tearDown(self): @@ -546,59 +571,99 @@ def test_upper_case_literal(self): self._setf(u'%upper{foo}') - self._assert_dest('/base/FOO') + self._assert_dest(b'/base/FOO') def test_upper_case_variable(self): self._setf(u'%upper{$title}') - self._assert_dest('/base/THE TITLE') + self._assert_dest(b'/base/THE TITLE') def test_title_case_variable(self): self._setf(u'%title{$title}') - self._assert_dest('/base/The Title') + self._assert_dest(b'/base/The Title') def test_left_variable(self): self._setf(u'%left{$title, 3}') - self._assert_dest('/base/the') + self._assert_dest(b'/base/the') def test_right_variable(self): self._setf(u'%right{$title,3}') - self._assert_dest('/base/tle') + self._assert_dest(b'/base/tle') def test_if_false(self): self._setf(u'x%if{,foo}') - self._assert_dest('/base/x') + self._assert_dest(b'/base/x') def test_if_false_value(self): self._setf(u'x%if{false,foo}') - self._assert_dest('/base/x') + self._assert_dest(b'/base/x') def test_if_true(self): self._setf(u'%if{bar,foo}') - self._assert_dest('/base/foo') + self._assert_dest(b'/base/foo') def test_if_else_false(self): self._setf(u'%if{,foo,baz}') - self._assert_dest('/base/baz') + self._assert_dest(b'/base/baz') def test_if_else_false_value(self): self._setf(u'%if{false,foo,baz}') - self._assert_dest('/base/baz') + self._assert_dest(b'/base/baz') def test_if_int_value(self): self._setf(u'%if{0,foo,baz}') - self._assert_dest('/base/baz') + self._assert_dest(b'/base/baz') def test_nonexistent_function(self): self._setf(u'%foo{bar}') - self._assert_dest('/base/%foo{bar}') + self._assert_dest(b'/base/%foo{bar}') + + def test_if_def_field_return_self(self): + self.i.bar = 3 + self._setf(u'%ifdef{bar}') + self._assert_dest(b'/base/3') + + def test_if_def_field_not_defined(self): + self._setf(u' %ifdef{bar}/$artist') + self._assert_dest(b'/base/the artist') + + def test_if_def_field_not_defined_2(self): + self._setf(u'$artist/%ifdef{bar}') + self._assert_dest(b'/base/the artist') + + def test_if_def_true(self): + self._setf(u'%ifdef{artist,cool}') + self._assert_dest(b'/base/cool') + + def test_if_def_true_complete(self): + self.i.series = "Now" + self._setf(u'%ifdef{series,$series Series,Albums}/$album') + self._assert_dest(b'/base/Now Series/the album') + + def test_if_def_false_complete(self): + self._setf(u'%ifdef{plays,$plays,not_played}') + self._assert_dest(b'/base/not_played') + + def test_first(self): + self.i.genres = "Pop; Rock; Classical Crossover" + self._setf(u'%first{$genres}') + self._assert_dest(b'/base/Pop') + + def test_first_skip(self): + self.i.genres = "Pop; Rock; Classical Crossover" + self._setf(u'%first{$genres,1,2}') + self._assert_dest(b'/base/Classical Crossover') + + def test_first_different_sep(self): + self._setf(u'%first{Alice / Bob / Eve,2,0, / , & }') + self._assert_dest(b'/base/Alice & Bob') class DisambiguationTest(_common.TestCase, PathFormattingMixin): def setUp(self): super(DisambiguationTest, self).setUp() self.lib = beets.library.Library(':memory:') - self.lib.directory = '/base' - self.lib.path_formats = [('default', u'path')] + self.lib.directory = b'/base' + self.lib.path_formats = [(u'default', u'path')] self.i1 = item() self.i1.year = 2001 @@ -615,86 +680,43 @@ self.lib._connection().close() def test_unique_expands_to_disambiguating_year(self): - self._assert_dest('/base/foo [2001]/the title', self.i1) + self._assert_dest(b'/base/foo [2001]/the title', self.i1) def test_unique_with_default_arguments_uses_albumtype(self): album2 = self.lib.get_album(self.i1) - album2.albumtype = 'bar' + album2.albumtype = u'bar' album2.store() self._setf(u'foo%aunique{}/$title') - self._assert_dest('/base/foo [bar]/the title', self.i1) + self._assert_dest(b'/base/foo [bar]/the title', self.i1) def test_unique_expands_to_nothing_for_distinct_albums(self): album2 = self.lib.get_album(self.i2) - album2.album = 'different album' + album2.album = u'different album' album2.store() - self._assert_dest('/base/foo/the title', self.i1) + self._assert_dest(b'/base/foo/the title', self.i1) def test_use_fallback_numbers_when_identical(self): album2 = self.lib.get_album(self.i2) album2.year = 2001 album2.store() - self._assert_dest('/base/foo 1/the title', self.i1) - self._assert_dest('/base/foo 2/the title', self.i2) + self._assert_dest(b'/base/foo 1/the title', self.i1) + self._assert_dest(b'/base/foo 2/the title', self.i2) def test_unique_falls_back_to_second_distinguishing_field(self): self._setf(u'foo%aunique{albumartist album,month year}/$title') - self._assert_dest('/base/foo [2001]/the title', self.i1) + self._assert_dest(b'/base/foo [2001]/the title', self.i1) def test_unique_sanitized(self): album2 = self.lib.get_album(self.i2) album2.year = 2001 album1 = self.lib.get_album(self.i1) - album1.albumtype = 'foo/bar' + album1.albumtype = u'foo/bar' album2.store() album1.store() self._setf(u'foo%aunique{albumartist album,albumtype}/$title') - self._assert_dest('/base/foo [foo_bar]/the title', self.i1) - - -class PathConversionTest(_common.TestCase): - def test_syspath_windows_format(self): - with _common.platform_windows(): - path = os.path.join('a', 'b', 'c') - outpath = util.syspath(path) - self.assertTrue(isinstance(outpath, unicode)) - self.assertTrue(outpath.startswith(u'\\\\?\\')) - - def test_syspath_windows_format_unc_path(self): - # The \\?\ prefix on Windows behaves differently with UNC - # (network share) paths. - path = '\\\\server\\share\\file.mp3' - with _common.platform_windows(): - outpath = util.syspath(path) - self.assertTrue(isinstance(outpath, unicode)) - self.assertEqual(outpath, u'\\\\?\\UNC\\server\\share\\file.mp3') - - def test_syspath_posix_unchanged(self): - with _common.platform_posix(): - path = os.path.join('a', 'b', 'c') - outpath = util.syspath(path) - self.assertEqual(path, outpath) - - def _windows_bytestring_path(self, path): - old_gfse = sys.getfilesystemencoding - sys.getfilesystemencoding = lambda: 'mbcs' - try: - with _common.platform_windows(): - return util.bytestring_path(path) - finally: - sys.getfilesystemencoding = old_gfse - - def test_bytestring_path_windows_encodes_utf8(self): - path = u'caf\xe9' - outpath = self._windows_bytestring_path(path) - self.assertEqual(path, outpath.decode('utf8')) - - def test_bytesting_path_windows_removes_magic_prefix(self): - path = u'\\\\?\\C:\\caf\xe9' - outpath = self._windows_bytestring_path(path) - self.assertEqual(outpath, u'C:\\caf\xe9'.encode('utf8')) + self._assert_dest(b'/base/foo [foo_bar]/the title', self.i1) class PluginDestinationTest(_common.TestCase): @@ -714,8 +736,8 @@ plugins.item_field_getters = field_getters self.lib = beets.library.Library(':memory:') - self.lib.directory = '/base' - self.lib.path_formats = [('default', u'$artist $foo')] + self.lib.directory = b'/base' + self.lib.path_formats = [(u'default', u'$artist $foo')] self.i = item(self.lib) def tearDown(self): @@ -725,28 +747,28 @@ def _assert_dest(self, dest): with _common.platform_posix(): the_dest = self.i.destination() - self.assertEqual(the_dest, '/base/' + dest) + self.assertEqual(the_dest, b'/base/' + dest) def test_undefined_value_not_substituted(self): - self._assert_dest('the artist $foo') + self._assert_dest(b'the artist $foo') def test_plugin_value_not_substituted(self): self._tv_map = { 'foo': 'bar', } - self._assert_dest('the artist bar') + self._assert_dest(b'the artist bar') def test_plugin_value_overrides_attribute(self): self._tv_map = { 'artist': 'bar', } - self._assert_dest('bar $foo') + self._assert_dest(b'bar $foo') def test_plugin_value_sanitized(self): self._tv_map = { 'foo': 'bar/baz', } - self._assert_dest('the artist bar_baz') + self._assert_dest(b'the artist bar_baz') class AlbumInfoTest(_common.TestCase): @@ -768,7 +790,7 @@ ai.artpath = '/my/great/art' ai.store() new_ai = self.lib.get_album(self.i) - self.assertEqual(new_ai.artpath, '/my/great/art') + self.assertEqual(new_ai.artpath, b'/my/great/art') def test_albuminfo_for_two_items_doesnt_duplicate_row(self): i2 = item(self.lib) @@ -783,7 +805,7 @@ def test_individual_tracks_have_no_albuminfo(self): i2 = item() - i2.album = 'aTotallyDifferentAlbum' + i2.album = u'aTotallyDifferentAlbum' self.lib.add(i2) ai = self.lib.get_album(i2) self.assertEqual(ai, None) @@ -799,29 +821,29 @@ if i.id == self.i.id: break else: - self.fail("item not found") + self.fail(u"item not found") def test_albuminfo_changes_affect_items(self): ai = self.lib.get_album(self.i) - ai.album = 'myNewAlbum' + ai.album = u'myNewAlbum' ai.store() i = self.lib.items()[0] - self.assertEqual(i.album, 'myNewAlbum') + self.assertEqual(i.album, u'myNewAlbum') def test_albuminfo_change_albumartist_changes_items(self): ai = self.lib.get_album(self.i) - ai.albumartist = 'myNewArtist' + ai.albumartist = u'myNewArtist' ai.store() i = self.lib.items()[0] - self.assertEqual(i.albumartist, 'myNewArtist') - self.assertNotEqual(i.artist, 'myNewArtist') + self.assertEqual(i.albumartist, u'myNewArtist') + self.assertNotEqual(i.artist, u'myNewArtist') def test_albuminfo_change_artist_does_not_change_items(self): ai = self.lib.get_album(self.i) - ai.artist = 'myNewArtist' + ai.artist = u'myNewArtist' ai.store() i = self.lib.items()[0] - self.assertNotEqual(i.artist, 'myNewArtist') + self.assertNotEqual(i.artist, u'myNewArtist') def test_albuminfo_remove_removes_items(self): item_id = self.i.id @@ -836,6 +858,16 @@ self.i.remove() self.assertEqual(len(self.lib.albums()), 0) + def test_noop_albuminfo_changes_affect_items(self): + i = self.lib.items()[0] + i.album = u'foobar' + i.store() + ai = self.lib.get_album(self.i) + ai.album = ai.album + ai.store() + i = self.lib.items()[0] + self.assertEqual(i.album, ai.album) + class ArtDestinationTest(_common.TestCase): def setUp(self): @@ -851,7 +883,8 @@ def test_art_filename_respects_setting(self): art = self.ai.art_destination('something.jpg') - self.assert_('%sartimage.jpg' % os.path.sep in art) + new_art = bytestring_path('%sartimage.jpg' % os.path.sep) + self.assertTrue(new_art in art) def test_art_path_in_item_dir(self): art = self.ai.art_destination('something.jpg') @@ -861,7 +894,7 @@ def test_art_path_sanitized(self): config['art_filename'] = u'artXimage' art = self.ai.art_destination('something.jpg') - self.assert_('artYimage' in art) + self.assertTrue(b'artYimage' in art) class PathStringTest(_common.TestCase): @@ -871,25 +904,25 @@ self.i = item(self.lib) def test_item_path_is_bytestring(self): - self.assert_(isinstance(self.i.path, str)) + self.assertTrue(isinstance(self.i.path, bytes)) def test_fetched_item_path_is_bytestring(self): i = list(self.lib.items())[0] - self.assert_(isinstance(i.path, str)) + self.assertTrue(isinstance(i.path, bytes)) def test_unicode_path_becomes_bytestring(self): self.i.path = u'unicodepath' - self.assert_(isinstance(self.i.path, str)) + self.assertTrue(isinstance(self.i.path, bytes)) def test_unicode_in_database_becomes_bytestring(self): self.lib._connection().execute(""" update items set path=? where id=? """, (self.i.id, u'somepath')) i = list(self.lib.items())[0] - self.assert_(isinstance(i.path, str)) + self.assertTrue(isinstance(i.path, bytes)) def test_special_chars_preserved_in_database(self): - path = 'b\xe1r' + path = u'b\xe1r'.encode('utf8') self.i.path = path self.i.store() i = list(self.lib.items())[0] @@ -897,7 +930,7 @@ def test_special_char_path_added_to_database(self): self.i.remove() - path = 'b\xe1r' + path = u'b\xe1r'.encode('utf8') i = item() i.path = path self.lib.add(i) @@ -907,13 +940,13 @@ def test_destination_returns_bytestring(self): self.i.artist = u'b\xe1r' dest = self.i.destination() - self.assert_(isinstance(dest, str)) + self.assertTrue(isinstance(dest, bytes)) def test_art_destination_returns_bytestring(self): self.i.artist = u'b\xe1r' alb = self.lib.add_album([self.i]) dest = alb.art_destination(u'image.jpg') - self.assert_(isinstance(dest, str)) + self.assertTrue(isinstance(dest, bytes)) def test_artpath_stores_special_chars(self): path = b'b\xe1r' @@ -926,17 +959,17 @@ def test_sanitize_path_with_special_chars(self): path = u'b\xe1r?' new_path = util.sanitize_path(path) - self.assert_(new_path.startswith(u'b\xe1r')) + self.assertTrue(new_path.startswith(u'b\xe1r')) def test_sanitize_path_returns_unicode(self): path = u'b\xe1r?' new_path = util.sanitize_path(path) - self.assert_(isinstance(new_path, unicode)) + self.assertTrue(isinstance(new_path, unicode)) def test_unicode_artpath_becomes_bytestring(self): alb = self.lib.add_album([self.i]) alb.artpath = u'somep\xe1th' - self.assert_(isinstance(alb.artpath, str)) + self.assertTrue(isinstance(alb.artpath, bytes)) def test_unicode_artpath_in_database_decoded(self): alb = self.lib.add_album([self.i]) @@ -945,31 +978,14 @@ (u'somep\xe1th', alb.id) ) alb = self.lib.get_album(alb.id) - self.assert_(isinstance(alb.artpath, str)) - - -class PathTruncationTest(_common.TestCase): - def test_truncate_bytestring(self): - with _common.platform_posix(): - p = util.truncate_path('abcde/fgh', 4) - self.assertEqual(p, 'abcd/fgh') - - def test_truncate_unicode(self): - with _common.platform_posix(): - p = util.truncate_path(u'abcde/fgh', 4) - self.assertEqual(p, u'abcd/fgh') - - def test_truncate_preserves_extension(self): - with _common.platform_posix(): - p = util.truncate_path(u'abcde/fgh.ext', 5) - self.assertEqual(p, u'abcde/f.ext') + self.assertTrue(isinstance(alb.artpath, bytes)) class MtimeTest(_common.TestCase): def setUp(self): super(MtimeTest, self).setUp() - self.ipath = os.path.join(self.temp_dir, 'testfile.mp3') - shutil.copy(os.path.join(_common.RSRC, 'full.mp3'), self.ipath) + self.ipath = os.path.join(self.temp_dir, b'testfile.mp3') + shutil.copy(os.path.join(_common.RSRC, b'full.mp3'), self.ipath) self.i = beets.library.Item.from_path(self.ipath) self.lib = beets.library.Library(':memory:') self.lib.add(self.i) @@ -986,16 +1002,16 @@ self.assertGreaterEqual(self.i.mtime, self._mtime()) def test_mtime_reset_on_db_modify(self): - self.i.title = 'something else' + self.i.title = u'something else' self.assertLess(self.i.mtime, self._mtime()) def test_mtime_up_to_date_after_write(self): - self.i.title = 'something else' + self.i.title = u'something else' self.i.write() self.assertGreaterEqual(self.i.mtime, self._mtime()) def test_mtime_up_to_date_after_read(self): - self.i.title = 'something else' + self.i.title = u'something else' self.i.read() self.assertGreaterEqual(self.i.mtime, self._mtime()) @@ -1017,25 +1033,33 @@ class TemplateTest(_common.LibTestCase): - def album_fields_override_item_values(self): - self.album = self.lib.add_album([self.i]) - self.album.albumartist = 'album-level' - self.album.store() - self.i.albumartist = 'track-level' - self.i.store() - self.assertEqual(self.i.evaluate_template('$albumartist'), - 'album-level') - def test_year_formatted_in_template(self): self.i.year = 123 self.i.store() - self.assertEqual(self.i.evaluate_template('$year'), '0123') + self.assertEqual(self.i.evaluate_template('$year'), u'0123') def test_album_flexattr_appears_in_item_template(self): self.album = self.lib.add_album([self.i]) - self.album.foo = 'baz' + self.album.foo = u'baz' self.album.store() - self.assertEqual(self.i.evaluate_template('$foo'), 'baz') + self.assertEqual(self.i.evaluate_template('$foo'), u'baz') + + def test_album_and_item_format(self): + config['format_album'] = u'foö $foo' + album = beets.library.Album() + album.foo = u'bar' + album.tagada = u'togodo' + self.assertEqual(u"{0}".format(album), u"foö bar") + self.assertEqual(u"{0:$tagada}".format(album), u"togodo") + self.assertEqual(unicode(album), u"foö bar") + self.assertEqual(bytes(album), b"fo\xc3\xb6 bar") + + config['format_item'] = 'bar $foo' + item = beets.library.Item() + item.foo = u'bar' + item.tagada = u'togodo' + self.assertEqual("{0}".format(item), u"bar bar") + self.assertEqual("{0:$tagada}".format(item), u"togodo") class UnicodePathTest(_common.LibTestCase): @@ -1047,43 +1071,63 @@ self.i.write() -class WriteTest(_common.LibTestCase): +class WriteTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + + def tearDown(self): + self.teardown_beets() + def test_write_nonexistant(self): - self.i.path = '/path/does/not/exist' - self.assertRaises(beets.library.ReadError, self.i.write) + item = self.create_item() + item.path = b'/path/does/not/exist' + with self.assertRaises(beets.library.ReadError): + item.write() def test_no_write_permission(self): - path = os.path.join(self.temp_dir, 'file.mp3') - shutil.copy(os.path.join(_common.RSRC, 'empty.mp3'), path) + item = self.add_item_fixture() + path = syspath(item.path) os.chmod(path, stat.S_IRUSR) try: - self.i.path = path - self.assertRaises(beets.library.WriteError, self.i.write) + self.assertRaises(beets.library.WriteError, item.write) finally: # Restore write permissions so the file can be cleaned up. os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) def test_write_with_custom_path(self): - custom_path = os.path.join(self.temp_dir, 'file.mp3') - self.i.path = os.path.join(self.temp_dir, 'item_file.mp3') - shutil.copy(os.path.join(_common.RSRC, 'empty.mp3'), custom_path) - shutil.copy(os.path.join(_common.RSRC, 'empty.mp3'), self.i.path) + item = self.add_item_fixture() + custom_path = os.path.join(self.temp_dir, b'custom.mp3') + shutil.copy(syspath(item.path), syspath(custom_path)) - self.i['artist'] = 'new artist' + item['artist'] = 'new artist' self.assertNotEqual(MediaFile(custom_path).artist, 'new artist') - self.assertNotEqual(MediaFile(self.i.path).artist, 'new artist') + self.assertNotEqual(MediaFile(item.path).artist, 'new artist') - self.i.write(custom_path) + item.write(custom_path) self.assertEqual(MediaFile(custom_path).artist, 'new artist') - self.assertNotEqual(MediaFile(self.i.path).artist, 'new artist') + self.assertNotEqual(MediaFile(item.path).artist, 'new artist') + + def test_write_custom_tags(self): + item = self.add_item_fixture(artist='old artist') + item.write(tags={'artist': 'new artist'}) + self.assertNotEqual(item.artist, 'new artist') + self.assertEqual(MediaFile(item.path).artist, 'new artist') + + def test_write_date_field(self): + # Since `date` is not a MediaField, this should do nothing. + item = self.add_item_fixture() + clean_year = item.year + item.date = u'foo' + item.write() + self.assertEqual(MediaFile(item.path).year, clean_year) class ItemReadTest(unittest.TestCase): def test_unreadable_raise_read_error(self): - unreadable = os.path.join(_common.RSRC, 'image-2x3.png') + unreadable = os.path.join(_common.RSRC, b'image-2x3.png') item = beets.library.Item() with self.assertRaises(beets.library.ReadError) as cm: item.read(unreadable) @@ -1096,6 +1140,87 @@ item.read('/thisfiledoesnotexist') +class FilesizeTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + + def tearDown(self): + self.teardown_beets() + + def test_filesize(self): + item = self.add_item_fixture() + self.assertNotEqual(item.filesize, 0) + + def test_nonexistent_file(self): + item = beets.library.Item() + self.assertEqual(item.filesize, 0) + + +class ParseQueryTest(unittest.TestCase): + def test_parse_invalid_query_string(self): + with self.assertRaises(beets.dbcore.InvalidQueryError) as raised: + beets.library.parse_query_string(u'foo"', None) + self.assertIsInstance(raised.exception, + beets.dbcore.query.ParsingError) + + def test_parse_bytes(self): + with self.assertRaises(AssertionError): + beets.library.parse_query_string(b"query", None) + + +class LibraryFieldTypesTest(unittest.TestCase): + """Test format() and parse() for library-specific field types""" + def test_datetype(self): + t = beets.library.DateType() + + # format + time_local = time.strftime(beets.config['time_format'].get(unicode), + time.localtime(123456789)) + self.assertEqual(time_local, t.format(123456789)) + # parse + self.assertEqual(123456789.0, t.parse(time_local)) + self.assertEqual(123456789.0, t.parse(u'123456789.0')) + self.assertEqual(t.null, t.parse(u'not123456789.0')) + self.assertEqual(t.null, t.parse(u'1973-11-29')) + + def test_pathtype(self): + t = beets.library.PathType() + + # format + self.assertEqual('/tmp', t.format('/tmp')) + self.assertEqual(u'/tmp/\xe4lbum', t.format(u'/tmp/\u00e4lbum')) + # parse + self.assertEqual(np(b'/tmp'), t.parse('/tmp')) + self.assertEqual(np(b'/tmp/\xc3\xa4lbum'), + t.parse(u'/tmp/\u00e4lbum/')) + + def test_musicalkey(self): + t = beets.library.MusicalKey() + + # parse + self.assertEqual(u'C#m', t.parse(u'c#m')) + self.assertEqual(u'Gm', t.parse(u'g minor')) + self.assertEqual(u'Not c#m', t.parse(u'not C#m')) + + def test_durationtype(self): + t = beets.library.DurationType() + + # format + self.assertEqual(u'1:01', t.format(61.23)) + self.assertEqual(u'60:01', t.format(3601.23)) + self.assertEqual(u'0:00', t.format(None)) + # parse + self.assertEqual(61.0, t.parse(u'1:01')) + self.assertEqual(61.23, t.parse(u'61.23')) + self.assertEqual(3601.0, t.parse(u'60:01')) + self.assertEqual(t.null, t.parse(u'1:00:01')) + self.assertEqual(t.null, t.parse(u'not61.23')) + # config format_raw_length + beets.config['format_raw_length'] = True + self.assertEqual(61.23, t.format(61.23)) + self.assertEqual(3601.23, t.format(3601.23)) + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_logging.py beets-1.3.19/test/test_logging.py --- beets-1.3.8+dfsg/test/test_logging.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_logging.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- + +"""Stupid tests that ensure logging works as expected""" +from __future__ import division, absolute_import, print_function + +import sys +import threading +import logging as log +from StringIO import StringIO + +import beets.logging as blog +from beets import plugins, ui +import beetsplug +from test import _common +from test._common import unittest, TestCase +from test import helper + + +class LoggingTest(TestCase): + def test_logging_management(self): + l1 = log.getLogger("foo123") + l2 = blog.getLogger("foo123") + self.assertEqual(l1, l2) + self.assertEqual(l1.__class__, log.Logger) + + l3 = blog.getLogger("bar123") + l4 = log.getLogger("bar123") + self.assertEqual(l3, l4) + self.assertEqual(l3.__class__, blog.BeetsLogger) + self.assertIsInstance(l3, (blog.StrFormatLogger, + blog.ThreadLocalLevelLogger)) + + l5 = l3.getChild("shalala") + self.assertEqual(l5.__class__, blog.BeetsLogger) + + l6 = blog.getLogger() + self.assertNotEqual(l1, l6) + + def test_str_format_logging(self): + l = blog.getLogger("baz123") + stream = StringIO() + handler = log.StreamHandler(stream) + + l.addHandler(handler) + l.propagate = False + + l.warning(u"foo {0} {bar}", "oof", bar=u"baz") + handler.flush() + self.assertTrue(stream.getvalue(), u"foo oof baz") + + +class LoggingLevelTest(unittest.TestCase, helper.TestHelper): + class DummyModule(object): + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + plugins.BeetsPlugin.__init__(self, 'dummy') + self.import_stages = [self.import_stage] + self.register_listener('dummy_event', self.listener) + + def log_all(self, name): + self._log.debug(u'debug ' + name) + self._log.info(u'info ' + name) + self._log.warning(u'warning ' + name) + + def commands(self): + cmd = ui.Subcommand('dummy') + cmd.func = lambda _, __, ___: self.log_all('cmd') + return (cmd,) + + def import_stage(self, session, task): + self.log_all('import_stage') + + def listener(self): + self.log_all('listener') + + def setUp(self): + sys.modules['beetsplug.dummy'] = self.DummyModule + beetsplug.dummy = self.DummyModule + self.setup_beets() + self.load_plugins('dummy') + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + del beetsplug.dummy + sys.modules.pop('beetsplug.dummy') + self.DummyModule.DummyPlugin.listeners = None + self.DummyModule.DummyPlugin._raw_listeners = None + + def test_command_level0(self): + self.config['verbose'] = 0 + with helper.capture_log() as logs: + self.run_command('dummy') + self.assertIn(u'dummy: warning cmd', logs) + self.assertIn(u'dummy: info cmd', logs) + self.assertNotIn(u'dummy: debug cmd', logs) + + def test_command_level1(self): + self.config['verbose'] = 1 + with helper.capture_log() as logs: + self.run_command('dummy') + self.assertIn(u'dummy: warning cmd', logs) + self.assertIn(u'dummy: info cmd', logs) + self.assertIn(u'dummy: debug cmd', logs) + + def test_command_level2(self): + self.config['verbose'] = 2 + with helper.capture_log() as logs: + self.run_command('dummy') + self.assertIn(u'dummy: warning cmd', logs) + self.assertIn(u'dummy: info cmd', logs) + self.assertIn(u'dummy: debug cmd', logs) + + def test_listener_level0(self): + self.config['verbose'] = 0 + with helper.capture_log() as logs: + plugins.send('dummy_event') + self.assertIn(u'dummy: warning listener', logs) + self.assertNotIn(u'dummy: info listener', logs) + self.assertNotIn(u'dummy: debug listener', logs) + + def test_listener_level1(self): + self.config['verbose'] = 1 + with helper.capture_log() as logs: + plugins.send('dummy_event') + self.assertIn(u'dummy: warning listener', logs) + self.assertIn(u'dummy: info listener', logs) + self.assertNotIn(u'dummy: debug listener', logs) + + def test_listener_level2(self): + self.config['verbose'] = 2 + with helper.capture_log() as logs: + plugins.send('dummy_event') + self.assertIn(u'dummy: warning listener', logs) + self.assertIn(u'dummy: info listener', logs) + self.assertIn(u'dummy: debug listener', logs) + + def test_import_stage_level0(self): + self.config['verbose'] = 0 + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + self.assertIn(u'dummy: warning import_stage', logs) + self.assertNotIn(u'dummy: info import_stage', logs) + self.assertNotIn(u'dummy: debug import_stage', logs) + + def test_import_stage_level1(self): + self.config['verbose'] = 1 + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + self.assertIn(u'dummy: warning import_stage', logs) + self.assertIn(u'dummy: info import_stage', logs) + self.assertNotIn(u'dummy: debug import_stage', logs) + + def test_import_stage_level2(self): + self.config['verbose'] = 2 + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + self.assertIn(u'dummy: warning import_stage', logs) + self.assertIn(u'dummy: info import_stage', logs) + self.assertIn(u'dummy: debug import_stage', logs) + + +@_common.slow_test() +class ConcurrentEventsTest(TestCase, helper.TestHelper): + """Similar to LoggingLevelTest but lower-level and focused on multiple + events interaction. Since this is a bit heavy we don't do it in + LoggingLevelTest. + """ + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self, test_case): + plugins.BeetsPlugin.__init__(self, 'dummy') + self.register_listener('dummy_event1', self.listener1) + self.register_listener('dummy_event2', self.listener2) + self.lock1 = threading.Lock() + self.lock2 = threading.Lock() + self.test_case = test_case + self.exc_info = None + self.t1_step = self.t2_step = 0 + + def log_all(self, name): + self._log.debug(u'debug ' + name) + self._log.info(u'info ' + name) + self._log.warning(u'warning ' + name) + + def listener1(self): + try: + self.test_case.assertEqual(self._log.level, log.INFO) + self.t1_step = 1 + self.lock1.acquire() + self.test_case.assertEqual(self._log.level, log.INFO) + self.t1_step = 2 + except Exception: + import sys + self.exc_info = sys.exc_info() + + def listener2(self): + try: + self.test_case.assertEqual(self._log.level, log.DEBUG) + self.t2_step = 1 + self.lock2.acquire() + self.test_case.assertEqual(self._log.level, log.DEBUG) + self.t2_step = 2 + except Exception: + import sys + self.exc_info = sys.exc_info() + + def setUp(self): + self.setup_beets(disk=True) + + def tearDown(self): + self.teardown_beets() + + def test_concurrent_events(self): + dp = self.DummyPlugin(self) + + def check_dp_exc(): + if dp.exc_info: + raise dp.exc_info[1], None, dp.exc_info[2] + + try: + dp.lock1.acquire() + dp.lock2.acquire() + self.assertEqual(dp._log.level, log.NOTSET) + + self.config['verbose'] = 1 + t1 = threading.Thread(target=dp.listeners['dummy_event1'][0]) + t1.start() # blocked. t1 tested its log level + while dp.t1_step != 1: + check_dp_exc() + self.assertTrue(t1.is_alive()) + self.assertEqual(dp._log.level, log.NOTSET) + + self.config['verbose'] = 2 + t2 = threading.Thread(target=dp.listeners['dummy_event2'][0]) + t2.start() # blocked. t2 tested its log level + while dp.t2_step != 1: + check_dp_exc() + self.assertTrue(t2.is_alive()) + self.assertEqual(dp._log.level, log.NOTSET) + + dp.lock1.release() # dummy_event1 tests its log level + finishes + while dp.t1_step != 2: + check_dp_exc() + t1.join(.1) + self.assertFalse(t1.is_alive()) + self.assertTrue(t2.is_alive()) + self.assertEqual(dp._log.level, log.NOTSET) + + dp.lock2.release() # dummy_event2 tests its log level + finishes + while dp.t2_step != 2: + check_dp_exc() + t2.join(.1) + self.assertFalse(t2.is_alive()) + + except: + print(u"Alive threads:", threading.enumerate()) + if dp.lock1.locked(): + print(u"Releasing lock1 after exception in test") + dp.lock1.release() + if dp.lock2.locked(): + print(u"Releasing lock2 after exception in test") + dp.lock2.release() + print(u"Alive threads:", threading.enumerate()) + raise + + def test_root_logger_levels(self): + """Root logger level should be shared between threads. + """ + self.config['threaded'] = True + + blog.getLogger('beets').set_global_level(blog.WARNING) + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + self.assertEqual(logs, []) + + blog.getLogger('beets').set_global_level(blog.INFO) + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + for l in logs: + self.assertIn(u"import", l) + self.assertIn(u"album", l) + + blog.getLogger('beets').set_global_level(blog.DEBUG) + with helper.capture_log() as logs: + importer = self.create_importer() + importer.run() + self.assertIn(u"Sending event: database_change", logs) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_lyrics.py beets-1.3.19/test/test_lyrics.py --- beets-1.3.8+dfsg/test/test_lyrics.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_lyrics.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Fabrice Laporte. +# Copyright 2016, Fabrice Laporte. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,9 +15,24 @@ """Tests for the 'lyrics' plugin.""" -from _common import unittest +from __future__ import division, absolute_import, print_function + +import os +from test import _common +import sys +import re + +from mock import MagicMock + +from test._common import unittest from beetsplug import lyrics from beets.library import Item +from beets.util import confit, bytestring_path +from beets import logging + +log = logging.getLogger('beets.test_lyrics') +raw_backend = lyrics.Backend({}, log) +google = lyrics.Google(MagicMock(), log) class LyricsPluginTest(unittest.TestCase): @@ -111,11 +127,267 @@ lyrics.remove_credits("""Lyrics brought by example.com"""), "" ) + + # don't remove 2nd verse for the only reason it contains 'lyrics' word text = """Look at all the shit that i done bought her See lyrics ain't nothin if the beat aint crackin""" self.assertEqual(lyrics.remove_credits(text), text) + def test_is_lyrics(self): + texts = ['LyricsMania.com - Copyright (c) 2013 - All Rights Reserved'] + texts += ["""All material found on this site is property\n + of mywickedsongtext brand"""] + for t in texts: + self.assertFalse(google.is_lyrics(t)) + + def test_slugify(self): + text = u"http://site.com/\xe7afe-au_lait(boisson)" + self.assertEqual(google.slugify(text), + 'http://site.com/cafe_au_lait') + + def test_scrape_strip_cruft(self): + text = u""" +  one +
+ two ! +

+ four""" + self.assertEqual(lyrics._scrape_strip_cruft(text, True), + "one\ntwo !\n\nfour") + + def test_scrape_strip_scripts(self): + text = u"""foobaz""" + self.assertEqual(lyrics._scrape_strip_cruft(text, True), + "foobaz") + + def test_scrape_strip_tag_in_comment(self): + text = u"""fooqux""" + self.assertEqual(lyrics._scrape_strip_cruft(text, True), + "fooqux") + + def test_scrape_merge_paragraphs(self): + text = u"one

two

three" + self.assertEqual(lyrics._scrape_merge_paragraphs(text), + "one\ntwo\nthree") + + def test_missing_lyrics(self): + self.assertFalse(google.is_lyrics(LYRICS_TEXTS['missing_texts'])) + + +def url_to_filename(url): + url = re.sub(r'https?://|www.', '', url) + fn = "".join(x for x in url if (x.isalnum() or x == '/')) + fn = fn.split('/') + fn = os.path.join(LYRICS_ROOT_DIR, + bytestring_path(fn[0]), + bytestring_path(fn[-1] + '.txt')) + return fn + + +def check_lyrics_fetched(): + """Return True if lyrics_download_samples.py has been runned and lyrics + pages are present in resources directory""" + lyrics_dirs = len([d for d in os.listdir(LYRICS_ROOT_DIR) if + os.path.isdir(os.path.join(LYRICS_ROOT_DIR, d))]) + # example.com is the only lyrics dir added to repo + return lyrics_dirs > 1 + + +class MockFetchUrl(object): + def __init__(self, pathval='fetched_path'): + self.pathval = pathval + self.fetched = None + + def __call__(self, url, filename=None): + self.fetched = url + fn = url_to_filename(url) + with open(fn, 'r') as f: + content = f.read() + return content + + +def is_lyrics_content_ok(title, text): + """Compare lyrics text to expected lyrics for given title""" + + keywords = LYRICS_TEXTS[google.slugify(title)] + return all(x in text.lower() for x in keywords) + +LYRICS_ROOT_DIR = os.path.join(_common.RSRC, b'lyrics') +LYRICS_TEXTS = confit.load_yaml(os.path.join(_common.RSRC, b'lyricstext.yaml')) +DEFAULT_SONG = dict(artist=u'The Beatles', title=u'Lady Madonna') + +DEFAULT_SOURCES = [ + dict(DEFAULT_SONG, url=u'http://lyrics.wikia.com/', + path=u'The_Beatles:Lady_Madonna'), + dict(artist=u'Santana', title=u'Black magic woman', + url='http://www.lyrics.com/', + path=u'black-magic-woman-lyrics-santana.html'), + dict(DEFAULT_SONG, url='https://www.musixmatch.com/', + path=u'lyrics/The-Beatles/Lady-Madonna'), +] + +# Every source entered in default beets google custom search engine +# must be listed below. +# Use default query when possible, or override artist and title fields +# if website don't have lyrics for default query. +GOOGLE_SOURCES = [ + dict(DEFAULT_SONG, + url=u'http://www.absolutelyrics.com', + path=u'/lyrics/view/the_beatles/lady_madonna'), + dict(DEFAULT_SONG, + url=u'http://www.azlyrics.com', + path=u'/lyrics/beatles/ladymadonna.html'), + dict(DEFAULT_SONG, + url=u'http://www.chartlyrics.com', + path=u'/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx'), + dict(DEFAULT_SONG, + url=u'http://www.elyricsworld.com', + path=u'/lady_madonna_lyrics_beatles.html'), + dict(url=u'http://www.lacoccinelle.net', + artist=u'Jacques Brel', title=u"Amsterdam", + path=u'/paroles-officielles/275679.html'), + dict(DEFAULT_SONG, + url=u'http://letras.mus.br/', path=u'the-beatles/275/'), + dict(DEFAULT_SONG, + url='http://www.lyricsmania.com/', + path='lady_madonna_lyrics_the_beatles.html'), + dict(artist=u'Santana', title=u'Black magic woman', + url='http://www.lyrics.com/', + path=u'black-magic-woman-lyrics-santana.html'), + dict(DEFAULT_SONG, url=u'http://lyrics.wikia.com/', + path=u'The_Beatles:Lady_Madonna'), + dict(DEFAULT_SONG, + url=u'http://www.lyrics.net', path=u'/lyric/19110224'), + dict(DEFAULT_SONG, + url=u'http://www.lyricsmode.com', + path=u'/lyrics/b/beatles/lady_madonna.html'), + dict(url=u'http://www.lyricsontop.com', + artist=u'Amy Winehouse', title=u"Jazz'n'blues", + path=u'/amy-winehouse-songs/jazz-n-blues-lyrics.html'), + dict(DEFAULT_SONG, + url='http://www.metrolyrics.com/', + path='lady-madonna-lyrics-beatles.html'), + dict(url='http://www.musica.com/', path='letras.asp?letra=2738', + artist=u'Santana', title=u'Black magic woman'), + dict(DEFAULT_SONG, + url=u'http://www.onelyrics.net/', + artist=u'Ben & Ellen Harper', title=u'City of dreams', + path='ben-ellen-harper-city-of-dreams-lyrics'), + dict(url=u'http://www.paroles.net/', + artist=u'Lilly Wood & the prick', title=u"Hey it's ok", + path=u'lilly-wood-the-prick/paroles-hey-it-s-ok'), + dict(DEFAULT_SONG, + url='http://www.releaselyrics.com', + path=u'/346e/the-beatles-lady-madonna-(love-version)/'), + dict(DEFAULT_SONG, + url=u'http://www.smartlyrics.com', + path=u'/Song18148-The-Beatles-Lady-Madonna-lyrics.aspx'), + dict(DEFAULT_SONG, + url='http://www.songlyrics.com', + path=u'/the-beatles/lady-madonna-lyrics'), + dict(DEFAULT_SONG, + url=u'http://www.stlyrics.com', + path=u'/songs/r/richiehavens48961/ladymadonna2069109.html'), + dict(DEFAULT_SONG, + url=u'http://www.sweetslyrics.com', + path=u'/761696.The%20Beatles%20-%20Lady%20Madonna.html') +] + + +class LyricsGooglePluginTest(unittest.TestCase): + """Test scraping heuristics on a fake html page. + Or run lyrics_download_samples.py first to check that beets google + custom search engine sources are correctly scraped. + """ + source = dict(url=u'http://www.example.com', artist=u'John Doe', + title=u'Beets song', path=u'/lyrics/beetssong') + + def setUp(self): + """Set up configuration""" + try: + __import__('bs4') + except ImportError: + self.skipTest('Beautiful Soup 4 not available') + if sys.version_info[:3] < (2, 7, 3): + self.skipTest("Python's built-in HTML parser is not good enough") + lyrics.LyricsPlugin() + raw_backend.fetch_url = MockFetchUrl() + + def test_mocked_source_ok(self): + """Test that lyrics of the mocked page are correctly scraped""" + url = self.source['url'] + self.source['path'] + if os.path.isfile(url_to_filename(url)): + res = lyrics.scrape_lyrics_from_html(raw_backend.fetch_url(url)) + self.assertTrue(google.is_lyrics(res), url) + self.assertTrue(is_lyrics_content_ok(self.source['title'], res), + url) + + def test_google_sources_ok(self): + """Test if lyrics present on websites registered in beets google custom + search engine are correctly scraped.""" + if not check_lyrics_fetched(): + self.skipTest("Run lyrics_download_samples.py script first.") + for s in GOOGLE_SOURCES: + url = s['url'] + s['path'] + if os.path.isfile(url_to_filename(url)): + res = lyrics.scrape_lyrics_from_html( + raw_backend.fetch_url(url)) + self.assertTrue(google.is_lyrics(res), url) + self.assertTrue(is_lyrics_content_ok(s['title'], res), url) + + def test_default_ok(self): + """Test default engines with the default query""" + if not check_lyrics_fetched(): + self.skipTest("Run lyrics_download_samples.py script first.") + for (source, s) in zip([lyrics.LyricsWiki, + lyrics.LyricsCom, + lyrics.MusiXmatch], DEFAULT_SOURCES): + url = s['url'] + s['path'] + if os.path.isfile(url_to_filename(url)): + res = source({}, log).fetch(s['artist'], s['title']) + self.assertTrue(google.is_lyrics(res), url) + self.assertTrue(is_lyrics_content_ok(s['title'], res), url) + + def test_is_page_candidate_exact_match(self): + """Test matching html page title with song infos -- when song infos are + present in the title.""" + from bs4 import SoupStrainer, BeautifulSoup + s = self.source + url = unicode(s['url'] + s['path']) + html = raw_backend.fetch_url(url) + soup = BeautifulSoup(html, "html.parser", + parse_only=SoupStrainer('title')) + self.assertEqual(google.is_page_candidate(url, soup.title.string, + s['title'], s['artist']), + True, url) + + def test_is_page_candidate_fuzzy_match(self): + """Test matching html page title with song infos -- when song infos are + not present in the title.""" + s = self.source + url = s['url'] + s['path'] + url_title = u'example.com | Beats song by John doe' + + # very small diffs (typo) are ok eg 'beats' vs 'beets' with same artist + self.assertEqual(google.is_page_candidate(url, url_title, s['title'], + s['artist']), True, url) + # reject different title + url_title = u'example.com | seets bong lyrics by John doe' + self.assertEqual(google.is_page_candidate(url, url_title, s['title'], + s['artist']), False, url) + + def test_is_page_candidate_special_chars(self): + """Ensure that `is_page_candidate` doesn't crash when the artist + and such contain special regular expression characters. + """ + # https://github.com/beetbox/beets/issues/1673 + s = self.source + url = s['url'] + s['path'] + url_title = u'foo' + + google.is_page_candidate(url, url_title, s['title'], u'Sunn O)))') + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_mb.py beets-1.3.19/test/test_mb.py --- beets-1.3.8+dfsg/test/test_mb.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_mb.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,10 +15,13 @@ """Tests for MusicBrainz API wrapper. """ -import _common -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test import _common +from test._common import unittest from beets.autotag import mb from beets import config +import mock class MBAlbumInfoTest(_common.TestCase): @@ -62,7 +66,7 @@ for i, recording in enumerate(tracks): track = { 'recording': recording, - 'position': str(i + 1), + 'position': i + 1, } if track_length: # Track lengths are distinct from recording lengths. @@ -312,6 +316,11 @@ self.assertEqual(track.artist_sort, 'TRACK ARTIST SORT NAME') self.assertEqual(track.artist_credit, 'TRACK ARTIST CREDIT') + def test_data_source(self): + release = self._make_release() + d = mb.album_info(release) + self.assertEqual(d.data_source, 'MusicBrainz') + class ParseIDTest(_common.TestCase): def test_parse_id_correct(self): @@ -407,6 +416,78 @@ self.assertEqual(flat, ('ALIASfr_P', 'ALIASSORTfr_P', 'CREDIT')) +class MBLibraryTest(unittest.TestCase): + def test_match_track(self): + with mock.patch('musicbrainzngs.search_recordings') as p: + p.return_value = { + 'recording-list': [{ + 'title': 'foo', + 'id': 'bar', + 'length': 42, + }], + } + ti = list(mb.match_track('hello', 'there'))[0] + + p.assert_called_with(artist='hello', recording='there', limit=5) + self.assertEqual(ti.title, 'foo') + self.assertEqual(ti.track_id, 'bar') + + def test_match_album(self): + mbid = 'd2a6f856-b553-40a0-ac54-a321e8e2da99' + with mock.patch('musicbrainzngs.search_releases') as sp: + sp.return_value = { + 'release-list': [{ + 'id': mbid, + }], + } + with mock.patch('musicbrainzngs.get_release_by_id') as gp: + gp.return_value = { + 'release': { + 'title': 'hi', + 'id': mbid, + 'medium-list': [{ + 'track-list': [{ + 'recording': { + 'title': 'foo', + 'id': 'bar', + 'length': 42, + }, + 'position': 9, + }], + 'position': 5, + }], + 'artist-credit': [{ + 'artist': { + 'name': 'some-artist', + 'id': 'some-id', + }, + }], + 'release-group': { + 'id': 'another-id', + } + } + } + + ai = list(mb.match_album('hello', 'there'))[0] + + sp.assert_called_with(artist='hello', release='there', limit=5) + gp.assert_called_with(mbid, mock.ANY) + self.assertEqual(ai.tracks[0].title, 'foo') + self.assertEqual(ai.album, 'hi') + + def test_match_track_empty(self): + with mock.patch('musicbrainzngs.search_recordings') as p: + til = list(mb.match_track(' ', ' ')) + self.assertFalse(p.called) + self.assertEqual(til, []) + + def test_match_album_empty(self): + with mock.patch('musicbrainzngs.search_releases') as p: + ail = list(mb.match_album(' ', ' ')) + self.assertFalse(p.called) + self.assertEqual(ail, []) + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_mbsubmit.py beets-1.3.19/test/test_mbsubmit.py --- beets-1.3.8+dfsg/test/test_mbsubmit.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_mbsubmit.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson and Diego Moreda. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import capture_stdout, control_stdin, TestHelper +from test.test_importer import ImportHelper, AutotagStub +from test.test_ui_importer import TerminalImportSessionSetup + + +class MBSubmitPluginTest(TerminalImportSessionSetup, unittest.TestCase, + ImportHelper, TestHelper): + def setUp(self): + self.setup_beets() + self.load_plugins('mbsubmit') + self._create_import_dir(2) + self._setup_import_session() + self.matcher = AutotagStub().install() + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + self.matcher.restore() + + def test_print_tracks_output(self): + """Test the output of the "print tracks" choice.""" + self.matcher.matching = AutotagStub.BAD + + with capture_stdout() as output: + with control_stdin('\n'.join(['p', 's'])): + # Print tracks; Skip + self.importer.run() + + # Manually build the string for comparing the output. + tracklist = (u'Print tracks? ' + u'01. Tag Title 1 - Tag Artist (0:01)\n' + u'02. Tag Title 2 - Tag Artist (0:01)') + self.assertIn(tracklist, output.getvalue()) + + def test_print_tracks_output_as_tracks(self): + """Test the output of the "print tracks" choice, as singletons.""" + self.matcher.matching = AutotagStub.BAD + + with capture_stdout() as output: + with control_stdin('\n'.join(['t', 's', 'p', 's'])): + # as Tracks; Skip; Print tracks; Skip + self.importer.run() + + # Manually build the string for comparing the output. + tracklist = (u'Print tracks? ' + u'02. Tag Title 2 - Tag Artist (0:01)') + self.assertIn(tracklist, output.getvalue()) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_mbsync.py beets-1.3.19/test/test_mbsync.py --- beets-1.3.8+dfsg/test/test_mbsync.py 2014-04-22 21:47:13.000000000 +0000 +++ beets-1.3.19/test/test_mbsync.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,13 +13,17 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + from mock import patch -from _common import unittest -from helper import TestHelper,\ +from test._common import unittest +from test.helper import TestHelper,\ generate_album_info, \ - generate_track_info + generate_track_info, \ + capture_log +from beets import config from beets.library import Item @@ -38,20 +43,20 @@ album_for_mbid.return_value = \ generate_album_info('album id', ['track id']) track_for_mbid.return_value = \ - generate_track_info('singleton track id', - {'title': 'singleton info'}) + generate_track_info(u'singleton track id', + {'title': u'singleton info'}) album_item = Item( - title='old title', - mb_albumid='album id', - mb_trackid='track id', + album=u'old title', + mb_albumid=u'album id', + mb_trackid=u'track id', path='' ) album = self.lib.add_album([album_item]) item = Item( - title='old title', - mb_trackid='singleton track id', + title=u'old title', + mb_trackid=u'singleton track id', path='', ) self.lib.add(item) @@ -59,13 +64,68 @@ self.run_command('mbsync') item.load() - self.assertEqual(item.title, 'singleton info') + self.assertEqual(item.title, u'singleton info') album_item.load() - self.assertEqual(album_item.title, 'track info') + self.assertEqual(album_item.title, u'track info') album.load() - self.assertEqual(album.album, 'album info') + self.assertEqual(album.album, u'album info') + + def test_message_when_skipping(self): + config['format_item'] = u'$artist - $album - $title' + config['format_album'] = u'$albumartist - $album' + + # Test album with no mb_albumid. + # The default format for an album include $albumartist so + # set that here, too. + album_invalid = Item( + albumartist=u'album info', + album=u'album info', + path='' + ) + self.lib.add_album([album_invalid]) + + # default format + with capture_log('beets.mbsync') as logs: + self.run_command('mbsync') + e = u'mbsync: Skipping album with no mb_albumid: ' + \ + u'album info - album info' + self.assertEqual(e, logs[0]) + + # custom format + with capture_log('beets.mbsync') as logs: + self.run_command('mbsync', '-f', "'$album'") + e = u"mbsync: Skipping album with no mb_albumid: 'album info'" + self.assertEqual(e, logs[0]) + + # restore the config + config['format_item'] = '$artist - $album - $title' + config['format_album'] = '$albumartist - $album' + + # Test singleton with no mb_trackid. + # The default singleton format includes $artist and $album + # so we need to stub them here + item_invalid = Item( + artist=u'album info', + album=u'album info', + title=u'old title', + path='', + ) + self.lib.add(item_invalid) + + # default format + with capture_log('beets.mbsync') as logs: + self.run_command('mbsync') + e = u'mbsync: Skipping singleton with no mb_trackid: ' + \ + u'album info - album info - old title' + self.assertEqual(e, logs[0]) + + # custom format + with capture_log('beets.mbsync') as logs: + self.run_command('mbsync', '-f', "'$title'") + e = u"mbsync: Skipping singleton with no mb_trackid: 'old title'" + self.assertEqual(e, logs[0]) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_mediafile_edge.py beets-1.3.19/test/test_mediafile_edge.py --- beets-1.3.8+dfsg/test/test_mediafile_edge.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_mediafile_edge.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,12 +15,16 @@ """Specific, edge-case tests for the MediaFile metadata layer. """ +from __future__ import division, absolute_import, print_function + import os import shutil -import _common -from _common import unittest -from helper import TestHelper +from test import _common +from test._common import unittest +from test.helper import TestHelper + +from beets.util import bytestring_path import beets.mediafile @@ -32,7 +37,7 @@ # This is very hard to produce, so this is just the first 8192 # bytes of a file found "in the wild". emptylist = beets.mediafile.MediaFile( - os.path.join(_common.RSRC, 'emptylist.mp3') + os.path.join(_common.RSRC, b'emptylist.mp3') ) genre = emptylist.genre self.assertEqual(genre, None) @@ -41,7 +46,7 @@ # Ensures that release times delimited by spaces are ignored. # Amie Street produces such files. space_time = beets.mediafile.MediaFile( - os.path.join(_common.RSRC, 'space_time.mp3') + os.path.join(_common.RSRC, b'space_time.mp3') ) self.assertEqual(space_time.year, 2009) self.assertEqual(space_time.month, 9) @@ -51,7 +56,7 @@ # Ensures that release times delimited by Ts are ignored. # The iTunes Store produces such files. t_time = beets.mediafile.MediaFile( - os.path.join(_common.RSRC, 't_time.m4a') + os.path.join(_common.RSRC, b't_time.m4a') ) self.assertEqual(t_time.year, 1987) self.assertEqual(t_time.month, 3) @@ -60,47 +65,66 @@ def test_tempo_with_bpm(self): # Some files have a string like "128 BPM" in the tempo field # rather than just a number. - f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'bpm.mp3')) + f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, b'bpm.mp3')) self.assertEqual(f.bpm, 128) def test_discc_alternate_field(self): # Different taggers use different vorbis comments to reflect # the disc and disc count fields: ensure that the alternative # style works. - f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'discc.ogg')) + f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, b'discc.ogg')) self.assertEqual(f.disc, 4) self.assertEqual(f.disctotal, 5) def test_old_ape_version_bitrate(self): - f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'oldape.ape')) + media_file = os.path.join(_common.RSRC, b'oldape.ape') + f = beets.mediafile.MediaFile(media_file) self.assertEqual(f.bitrate, 0) + def test_only_magic_bytes_jpeg(self): + # Some jpeg files can only be recognized by their magic bytes and as + # such aren't recognized by imghdr. Ensure that this still works thanks + # to our own follow up mimetype detection based on + # https://github.com/file/file/blob/master/magic/Magdir/jpeg#L12 + f = open(os.path.join(_common.RSRC, b'only-magic-bytes.jpg'), 'rb') + jpg_data = f.read() + self.assertEqual( + beets.mediafile._image_mime_type(jpg_data), + 'image/jpeg') + + def test_soundcheck_non_ascii(self): + # Make sure we don't crash when the iTunes SoundCheck field contains + # non-ASCII binary data. + f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, + b'soundcheck-nonascii.m4a')) + self.assertEqual(f.rg_track_gain, 0.0) + class InvalidValueToleranceTest(unittest.TestCase): def test_safe_cast_string_to_int(self): - self.assertEqual(_sc(int, 'something'), 0) + self.assertEqual(_sc(int, u'something'), 0) def test_safe_cast_int_string_to_int(self): - self.assertEqual(_sc(int, '20'), 20) + self.assertEqual(_sc(int, u'20'), 20) def test_safe_cast_string_to_bool(self): - self.assertEqual(_sc(bool, 'whatever'), False) + self.assertEqual(_sc(bool, u'whatever'), False) def test_safe_cast_intstring_to_bool(self): - self.assertEqual(_sc(bool, '5'), True) + self.assertEqual(_sc(bool, u'5'), True) def test_safe_cast_string_to_float(self): - self.assertAlmostEqual(_sc(float, '1.234'), 1.234) + self.assertAlmostEqual(_sc(float, u'1.234'), 1.234) def test_safe_cast_int_to_float(self): self.assertAlmostEqual(_sc(float, 2), 2.0) def test_safe_cast_string_with_cruft_to_float(self): - self.assertAlmostEqual(_sc(float, '1.234stuff'), 1.234) + self.assertAlmostEqual(_sc(float, u'1.234stuff'), 1.234) def test_safe_cast_negative_string_to_float(self): - self.assertAlmostEqual(_sc(float, '-1.234'), -1.234) + self.assertAlmostEqual(_sc(float, u'-1.234'), -1.234) def test_safe_cast_special_chars_to_unicode(self): us = _sc(unicode, 'caf\xc3\xa9') @@ -108,9 +132,17 @@ self.assertTrue(us.startswith(u'caf')) def test_safe_cast_float_with_no_numbers(self): - v = _sc(float, '+') + v = _sc(float, u'+') + self.assertEqual(v, 0.0) + + def test_safe_cast_float_with_dot_only(self): + v = _sc(float, u'.') self.assertEqual(v, 0.0) + def test_safe_cast_float_with_multiple_dots(self): + v = _sc(float, u'1.0.0') + self.assertEqual(v, 1.0) + class SafetyTest(unittest.TestCase, TestHelper): def setUp(self): @@ -130,34 +162,34 @@ def test_corrupt_mp3_raises_unreadablefileerror(self): # Make sure we catch Mutagen reading errors appropriately. - self._exccheck('corrupt.mp3', beets.mediafile.UnreadableFileError) + self._exccheck(b'corrupt.mp3', beets.mediafile.UnreadableFileError) def test_corrupt_mp4_raises_unreadablefileerror(self): - self._exccheck('corrupt.m4a', beets.mediafile.UnreadableFileError) + self._exccheck(b'corrupt.m4a', beets.mediafile.UnreadableFileError) def test_corrupt_flac_raises_unreadablefileerror(self): - self._exccheck('corrupt.flac', beets.mediafile.UnreadableFileError) + self._exccheck(b'corrupt.flac', beets.mediafile.UnreadableFileError) def test_corrupt_ogg_raises_unreadablefileerror(self): - self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError) + self._exccheck(b'corrupt.ogg', beets.mediafile.UnreadableFileError) def test_invalid_ogg_header_raises_unreadablefileerror(self): - self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError, + self._exccheck(b'corrupt.ogg', beets.mediafile.UnreadableFileError, 'OggS\x01vorbis') def test_corrupt_monkeys_raises_unreadablefileerror(self): - self._exccheck('corrupt.ape', beets.mediafile.UnreadableFileError) + self._exccheck(b'corrupt.ape', beets.mediafile.UnreadableFileError) def test_invalid_extension_raises_filetypeerror(self): - self._exccheck('something.unknown', beets.mediafile.FileTypeError) + self._exccheck(b'something.unknown', beets.mediafile.FileTypeError) def test_magic_xml_raises_unreadablefileerror(self): - self._exccheck('nothing.xml', beets.mediafile.UnreadableFileError, + self._exccheck(b'nothing.xml', beets.mediafile.UnreadableFileError, "ftyp") - @unittest.skipIf(not hasattr(os, 'symlink'), 'platform lacks symlink') + @unittest.skipIf(not hasattr(os, 'symlink'), u'platform lacks symlink') def test_broken_symlink(self): - fn = os.path.join(_common.RSRC, 'brokenlink') + fn = os.path.join(_common.RSRC, b'brokenlink') os.symlink('does_not_exist', fn) try: self.assertRaises(IOError, @@ -168,7 +200,7 @@ class SideEffectsTest(unittest.TestCase): def setUp(self): - self.empty = os.path.join(_common.RSRC, 'empty.mp3') + self.empty = os.path.join(_common.RSRC, b'empty.mp3') def test_opening_tagless_file_leaves_untouched(self): old_mtime = os.stat(self.empty).st_mtime @@ -177,11 +209,11 @@ self.assertEqual(old_mtime, new_mtime) -class EncodingTest(unittest.TestCase, TestHelper): +class MP4EncodingTest(unittest.TestCase, TestHelper): def setUp(self): self.create_temp_dir() - src = os.path.join(_common.RSRC, 'full.m4a') - self.path = os.path.join(self.temp_dir, 'test.m4a') + src = os.path.join(_common.RSRC, b'full.m4a') + self.path = os.path.join(self.temp_dir, b'test.m4a') shutil.copy(src, self.path) self.mf = beets.mediafile.MediaFile(self.path) @@ -196,6 +228,26 @@ self.assertEqual(new_mf.label, u'foo\xe8bar') +class MP3EncodingTest(unittest.TestCase, TestHelper): + def setUp(self): + self.create_temp_dir() + src = os.path.join(_common.RSRC, b'full.mp3') + self.path = os.path.join(self.temp_dir, b'test.mp3') + shutil.copy(src, self.path) + + self.mf = beets.mediafile.MediaFile(self.path) + + def test_comment_with_latin1_encoding(self): + # Set up the test file with a Latin1-encoded COMM frame. The encoding + # indices defined by MP3 are listed here: + # http://id3.org/id3v2.4.0-structure + self.mf.mgfile['COMM::eng'].encoding = 0 + + # Try to store non-Latin1 text. + self.mf.comments = u'\u2028' + self.mf.save() + + class ZeroLengthMediaFile(beets.mediafile.MediaFile): @property def length(self): @@ -205,7 +257,7 @@ class MissingAudioDataTest(unittest.TestCase): def setUp(self): super(MissingAudioDataTest, self).setUp() - path = os.path.join(_common.RSRC, 'full.mp3') + path = os.path.join(_common.RSRC, b'full.mp3') self.mf = ZeroLengthMediaFile(path) def test_bitrate_with_zero_length(self): @@ -216,11 +268,11 @@ class TypeTest(unittest.TestCase): def setUp(self): super(TypeTest, self).setUp() - path = os.path.join(_common.RSRC, 'full.mp3') + path = os.path.join(_common.RSRC, b'full.mp3') self.mf = beets.mediafile.MediaFile(path) def test_year_integer_in_string(self): - self.mf.year = '2009' + self.mf.year = u'2009' self.assertEqual(self.mf.year, 2009) def test_set_replaygain_gain_to_none(self): @@ -255,18 +307,25 @@ self.assertEqual(peak, 1.0) def test_decode_zero(self): - data = u' 80000000 80000000 00000000 00000000 00000000 00000000 ' \ - u'00000000 00000000 00000000 00000000' + data = b' 80000000 80000000 00000000 00000000 00000000 00000000 ' \ + b'00000000 00000000 00000000 00000000' gain, peak = beets.mediafile._sc_decode(data) self.assertEqual(gain, 0.0) self.assertEqual(peak, 0.0) def test_malformatted(self): - gain, peak = beets.mediafile._sc_decode(u'foo') + gain, peak = beets.mediafile._sc_decode(b'foo') self.assertEqual(gain, 0.0) self.assertEqual(peak, 0.0) def test_special_characters(self): + gain, peak = beets.mediafile._sc_decode(u'caf\xe9'.encode('utf8')) + self.assertEqual(gain, 0.0) + self.assertEqual(peak, 0.0) + + def test_decode_handles_unicode(self): + # Most of the time, we expect to decode the raw bytes. But some formats + # might give us text strings, which we need to handle. gain, peak = beets.mediafile._sc_decode(u'caf\xe9') self.assertEqual(gain, 0.0) self.assertEqual(peak, 0.0) @@ -275,8 +334,10 @@ class ID3v23Test(unittest.TestCase, TestHelper): def _make_test(self, ext='mp3', id3v23=False): self.create_temp_dir() - src = os.path.join(_common.RSRC, 'full.{0}'.format(ext)) - self.path = os.path.join(self.temp_dir, 'test.{0}'.format(ext)) + src = os.path.join(_common.RSRC, + bytestring_path('full.{0}'.format(ext))) + self.path = os.path.join(self.temp_dir, + bytestring_path('test.{0}'.format(ext))) shutil.copy(src, self.path) return beets.mediafile.MediaFile(self.path, id3v23=id3v23) @@ -289,7 +350,7 @@ mf.year = 2013 mf.save() frame = mf.mgfile['TDRC'] - self.assertTrue('2013' in str(frame)) + self.assertTrue('2013' in unicode(frame)) self.assertTrue('TYER' not in mf.mgfile) finally: self._delete_test() @@ -300,7 +361,7 @@ mf.year = 2013 mf.save() frame = mf.mgfile['TYER'] - self.assertTrue('2013' in str(frame)) + self.assertTrue('2013' in unicode(frame)) self.assertTrue('TDRC' not in mf.mgfile) finally: self._delete_test() diff -Nru beets-1.3.8+dfsg/test/test_mediafile.py beets-1.3.19/test/test_mediafile.py --- beets-1.3.8+dfsg/test/test_mediafile.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_mediafile.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,19 +16,22 @@ """Automatically-generated blanket testing for the MediaFile metadata layer. """ +from __future__ import division, absolute_import, print_function + import os import shutil import tempfile import datetime import time -import _common -from _common import unittest +from test import _common +from test._common import unittest from beets.mediafile import MediaFile, MediaField, Image, \ MP3DescStorageStyle, StorageStyle, MP4StorageStyle, \ - ASFStorageStyle, ImageType + ASFStorageStyle, ImageType, CoverArtField from beets.library import Item from beets.plugins import BeetsPlugin +from beets.util import bytestring_path class ArtTestMixin(object): @@ -37,7 +41,8 @@ @property def png_data(self): if not self._png_data: - with open(os.path.join(_common.RSRC, 'image-2x3.png'), 'rb') as f: + image_file = os.path.join(_common.RSRC, b'image-2x3.png') + with open(image_file, 'rb') as f: self._png_data = f.read() return self._png_data _png_data = None @@ -45,7 +50,8 @@ @property def jpg_data(self): if not self._jpg_data: - with open(os.path.join(_common.RSRC, 'image-2x3.jpg'), 'rb') as f: + image_file = os.path.join(_common.RSRC, b'image-2x3.jpg') + with open(image_file, 'rb') as f: self._jpg_data = f.read() return self._jpg_data _jpg_data = None @@ -53,7 +59,8 @@ @property def tiff_data(self): if not self._jpg_data: - with open(os.path.join(_common.RSRC, 'image-2x3.tiff'), 'rb') as f: + image_file = os.path.join(_common.RSRC, b'image-2x3.tiff') + with open(image_file, 'rb') as f: self._jpg_data = f.read() return self._jpg_data _jpg_data = None @@ -74,6 +81,20 @@ mediafile = MediaFile(mediafile.path) self.assertEqual(mediafile.art, self.jpg_data) + def test_delete_art(self): + mediafile = self._mediafile_fixture('empty') + mediafile.art = self.jpg_data + mediafile.save() + + mediafile = MediaFile(mediafile.path) + self.assertIsNotNone(mediafile.art) + + del mediafile.art + mediafile.save() + + mediafile = MediaFile(mediafile.path) + self.assertIsNone(mediafile.art) + class ImageStructureTestMixin(ArtTestMixin): """Test reading and writing multiple image tags. @@ -91,18 +112,18 @@ image = next(i for i in mediafile.images if i.mime_type == 'image/png') self.assertEqual(image.data, self.png_data) - self.assertExtendedImageAttributes(image, desc='album cover', + self.assertExtendedImageAttributes(image, desc=u'album cover', type=ImageType.front) image = next(i for i in mediafile.images if i.mime_type == 'image/jpeg') self.assertEqual(image.data, self.jpg_data) - self.assertExtendedImageAttributes(image, desc='the artist', + self.assertExtendedImageAttributes(image, desc=u'the artist', type=ImageType.artist) def test_set_image_structure(self): mediafile = self._mediafile_fixture('empty') - image = Image(data=self.png_data, desc='album cover', + image = Image(data=self.png_data, desc=u'album cover', type=ImageType.front) mediafile.images = [image] mediafile.save() @@ -113,14 +134,14 @@ image = mediafile.images[0] self.assertEqual(image.data, self.png_data) self.assertEqual(image.mime_type, 'image/png') - self.assertExtendedImageAttributes(image, desc='album cover', + self.assertExtendedImageAttributes(image, desc=u'album cover', type=ImageType.front) def test_add_image_structure(self): mediafile = self._mediafile_fixture('image') self.assertEqual(len(mediafile.images), 2) - image = Image(data=self.png_data, desc='the composer', + image = Image(data=self.png_data, desc=u'the composer', type=ImageType.composer) mediafile.images += [image] mediafile.save() @@ -128,13 +149,30 @@ mediafile = MediaFile(mediafile.path) self.assertEqual(len(mediafile.images), 3) - images = (i for i in mediafile.images if i.desc == 'the composer') + images = (i for i in mediafile.images if i.desc == u'the composer') image = next(images, None) self.assertExtendedImageAttributes( - image, desc='the composer', type=ImageType.composer + image, desc=u'the composer', type=ImageType.composer ) - def assertExtendedImageAttributes(self, image, **kwargs): + def test_delete_image_structures(self): + mediafile = self._mediafile_fixture('image') + self.assertEqual(len(mediafile.images), 2) + + del mediafile.images + mediafile.save() + + mediafile = MediaFile(mediafile.path) + self.assertEqual(len(mediafile.images), 0) + + def test_guess_cover(self): + mediafile = self._mediafile_fixture('image') + self.assertEqual(len(mediafile.images), 2) + cover = CoverArtField.guess_cover_image(mediafile.images) + self.assertEqual(cover.desc, u'album cover') + self.assertEqual(mediafile.art, cover.data) + + def assertExtendedImageAttributes(self, image, **kwargs): # noqa """Ignore extended image attributes in the base tests. """ pass @@ -143,7 +181,7 @@ class ExtendedImageStructureTestMixin(ImageStructureTestMixin): """Checks for additional attributes in the image structure.""" - def assertExtendedImageAttributes(self, image, desc=None, type=None): + def assertExtendedImageAttributes(self, image, desc=None, type=None): # noqa self.assertEqual(image.desc, desc) self.assertEqual(image.type, type) @@ -151,7 +189,7 @@ mediafile = self._mediafile_fixture('image') self.assertEqual(len(mediafile.images), 2) - image = Image(data=self.tiff_data, desc='the composer', + image = Image(data=self.tiff_data, desc=u'the composer', type=ImageType.composer) mediafile.images += [image] mediafile.save() @@ -160,17 +198,17 @@ self.assertEqual(len(mediafile.images), 3) # WMA does not preserve the order, so we have to work around this - image = filter(lambda i: i.mime_type == 'image/tiff', - mediafile.images)[0] + image = list(filter(lambda i: i.mime_type == 'image/tiff', + mediafile.images))[0] self.assertExtendedImageAttributes( - image, desc='the composer', type=ImageType.composer) + image, desc=u'the composer', type=ImageType.composer) class LazySaveTestMixin(object): """Mediafile should only write changes when tags have changed """ - @unittest.skip('not yet implemented') + @unittest.skip(u'not yet implemented') def test_unmodified(self): mediafile = self._mediafile_fixture('full') mtime = self._set_past_mtime(mediafile.path) @@ -179,7 +217,7 @@ mediafile.save() self.assertEqual(os.stat(mediafile.path).st_mtime, mtime) - @unittest.skip('not yet implemented') + @unittest.skip(u'not yet implemented') def test_same_tag_value(self): mediafile = self._mediafile_fixture('full') mtime = self._set_past_mtime(mediafile.path) @@ -198,14 +236,14 @@ mediafile.save() self.assertEqual(os.stat(mediafile.path).st_mtime, mtime) - @unittest.skip('not yet implemented') + @unittest.skip(u'not yet implemented') def test_tag_value_change(self): mediafile = self._mediafile_fixture('full') mtime = self._set_past_mtime(mediafile.path) self.assertEqual(os.stat(mediafile.path).st_mtime, mtime) mediafile.title = mediafile.title - mediafile.album = 'another' + mediafile.album = u'another' mediafile.save() self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime) @@ -214,7 +252,7 @@ mtime = self._set_past_mtime(mediafile.path) self.assertEqual(os.stat(mediafile.path).st_mtime, mtime) - mediafile.update({'title': mediafile.title, 'album': 'another'}) + mediafile.update({'title': mediafile.title, 'album': u'another'}) mediafile.save() self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime) @@ -238,7 +276,7 @@ mediafile.save() mediafile = MediaFile(mediafile.path) - self.assertItemsEqual(mediafile.genres, ['one', 'two']) + self.assertItemsEqual(mediafile.genres, [u'one', u'two']) def test_write_genre_list_get_first(self): mediafile = self._mediafile_fixture('empty') @@ -246,11 +284,11 @@ mediafile.save() mediafile = MediaFile(mediafile.path) - self.assertEqual(mediafile.genre, 'one') + self.assertEqual(mediafile.genre, u'one') def test_append_genre_list(self): mediafile = self._mediafile_fixture('full') - self.assertEqual(mediafile.genre, 'the genre') + self.assertEqual(mediafile.genre, u'the genre') mediafile.genres += [u'another'] mediafile.save() @@ -273,11 +311,11 @@ plugin.add_media_field('customtag', field_extension) mediafile = self._mediafile_fixture('empty') - mediafile.customtag = 'F#' + mediafile.customtag = u'F#' mediafile.save() mediafile = MediaFile(mediafile.path) - self.assertEqual(mediafile.customtag, 'F#') + self.assertEqual(mediafile.customtag, u'F#') delattr(MediaFile, 'customtag') Item._media_fields.remove('customtag') @@ -288,10 +326,10 @@ mediafile = self._mediafile_fixture('empty') self.assertIsNone(mediafile.customtag) - item = Item(path=mediafile.path, customtag='Gb') + item = Item(path=mediafile.path, customtag=u'Gb') item.write() mediafile = MediaFile(mediafile.path) - self.assertEqual(mediafile.customtag, 'Gb') + self.assertEqual(mediafile.customtag, u'Gb') delattr(MediaFile, 'customtag') Item._media_fields.remove('customtag') @@ -301,11 +339,11 @@ plugin.add_media_field('customtag', field_extension) mediafile = self._mediafile_fixture('empty') - mediafile.update({'customtag': 'F#'}) + mediafile.update({'customtag': u'F#'}) mediafile.save() item = Item.from_path(mediafile.path) - self.assertEqual(item['customtag'], 'F#') + self.assertEqual(item['customtag'], u'F#') delattr(MediaFile, 'customtag') Item._media_fields.remove('customtag') @@ -313,12 +351,14 @@ def test_invalid_descriptor(self): with self.assertRaises(ValueError) as cm: MediaFile.add_field('somekey', True) - self.assertIn('must be an instance of MediaField', str(cm.exception)) + self.assertIn(u'must be an instance of MediaField', + unicode(cm.exception)) def test_overwrite_property(self): with self.assertRaises(ValueError) as cm: MediaFile.add_field('artist', MediaField()) - self.assertIn('property "artist" already exists', str(cm.exception)) + self.assertIn(u'property "artist" already exists', + unicode(cm.exception)) class ReadWriteTestBase(ArtTestMixin, GenreListTestMixin, @@ -624,22 +664,22 @@ self.assertIsNone(mediafile.date) self.assertIsNone(mediafile.year) - def assertTags(self, mediafile, tags): + def assertTags(self, mediafile, tags): # noqa errors = [] for key, value in tags.items(): try: value2 = getattr(mediafile, key) except AttributeError: - errors.append('Tag %s does not exist' % key) + errors.append(u'Tag %s does not exist' % key) else: if value2 != value: - errors.append('Tag %s: %s != %s' % (key, value2, value)) + errors.append(u'Tag %s: %r != %r' % (key, value2, value)) if any(errors): - errors = ['Tags did not match'] + errors + errors = [u'Tags did not match'] + errors self.fail('\n '.join(errors)) def _mediafile_fixture(self, name): - name = name + '.' + self.extension + name = bytestring_path(name + '.' + self.extension) src = os.path.join(_common.RSRC, name) target = os.path.join(self.temp_dir, name) shutil.copy(src, target) @@ -674,6 +714,7 @@ tags['original_year'] = original_date.year tags['original_month'] = original_date.month tags['original_day'] = original_date.day + return tags @@ -706,6 +747,10 @@ 'channels': 1, } + def test_unknown_apic_type(self): + mediafile = self._mediafile_fixture('image_unknown_type') + self.assertEqual(mediafile.images[0].type, ImageType.other) + class MP4Test(ReadWriteTestBase, PartialTestMixin, ImageStructureTestMixin, unittest.TestCase): @@ -724,16 +769,20 @@ with self.assertRaises(ValueError): mediafile.images = [Image(data=self.tiff_data)] + def test_guess_cover(self): + # There is no metadata associated with images, we pick one at random + pass + class AlacTest(ReadWriteTestBase, unittest.TestCase): extension = 'alac.m4a' audio_properties = { 'length': 1.0, - 'bitrate': 55072, - 'format': 'ALAC', - 'samplerate': 0, - 'bitdepth': 0, - 'channels': 0, + 'bitrate': 21830, + # 'format': 'ALAC', + 'samplerate': 44100, + 'bitdepth': 16, + 'channels': 1, } @@ -742,7 +791,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 23458, - 'format': 'Musepack', + 'format': u'Musepack', 'samplerate': 44100, 'bitdepth': 0, 'channels': 2, @@ -755,7 +804,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 128000, - 'format': 'Windows Media', + 'format': u'Windows Media', 'samplerate': 44100, 'bitdepth': 0, 'channels': 1, @@ -770,6 +819,12 @@ mediafile = MediaFile(mediafile.path) self.assertIn(mediafile.genre, [u'one', u'two']) + def test_read_pure_tags(self): + mediafile = self._mediafile_fixture('pure') + self.assertEqual(mediafile.comments, u'the comments') + self.assertEqual(mediafile.title, u'the title') + self.assertEqual(mediafile.artist, u'the artist') + class OggTest(ReadWriteTestBase, ExtendedImageStructureTestMixin, unittest.TestCase): @@ -777,7 +832,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 48000, - 'format': 'OGG', + 'format': u'OGG', 'samplerate': 44100, 'bitdepth': 0, 'channels': 1, @@ -807,6 +862,12 @@ mediafile = MediaFile(mediafile.path) self.assertFalse('coverart' in mediafile.mgfile) + def test_date_tag_with_slashes(self): + mediafile = self._mediafile_fixture('date_with_slashes') + self.assertEqual(mediafile.year, 2005) + self.assertEqual(mediafile.month, 6) + self.assertEqual(mediafile.day, 5) + class FlacTest(ReadWriteTestBase, PartialTestMixin, ExtendedImageStructureTestMixin, @@ -815,19 +876,20 @@ audio_properties = { 'length': 1.0, 'bitrate': 175120, - 'format': 'FLAC', + 'format': u'FLAC', 'samplerate': 44100, 'bitdepth': 16, 'channels': 1, } -class ApeTest(ReadWriteTestBase, unittest.TestCase): +class ApeTest(ReadWriteTestBase, ExtendedImageStructureTestMixin, + unittest.TestCase): extension = 'ape' audio_properties = { 'length': 1.0, 'bitrate': 112040, - 'format': 'APE', + 'format': u'APE', 'samplerate': 44100, 'bitdepth': 16, 'channels': 1, @@ -839,7 +901,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 108744, - 'format': 'WavPack', + 'format': u'WavPack', 'samplerate': 44100, 'bitdepth': 0, 'channels': 1, @@ -851,7 +913,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 57984, - 'format': 'Opus', + 'format': u'Opus', 'samplerate': 48000, 'bitdepth': 0, 'channels': 1, @@ -863,7 +925,7 @@ audio_properties = { 'length': 1.0, 'bitrate': 705600, - 'format': 'AIFF', + 'format': u'AIFF', 'samplerate': 44100, 'bitdepth': 0, 'channels': 1, @@ -873,13 +935,13 @@ class MediaFieldTest(unittest.TestCase): def test_properties_from_fields(self): - path = os.path.join(_common.RSRC, 'full.mp3') + path = os.path.join(_common.RSRC, b'full.mp3') mediafile = MediaFile(path) for field in MediaFile.fields(): self.assertTrue(hasattr(mediafile, field)) def test_properties_from_readable_fields(self): - path = os.path.join(_common.RSRC, 'full.mp3') + path = os.path.join(_common.RSRC, b'full.mp3') mediafile = MediaFile(path) for field in MediaFile.readable_fields(): self.assertTrue(hasattr(mediafile, field)) diff -Nru beets-1.3.8+dfsg/test/test_metasync.py beets-1.3.19/test/test_metasync.py --- beets-1.3.8+dfsg/test/test_metasync.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_metasync.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Tom Jaspers. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import os +import platform +import time +from datetime import datetime +from beets.library import Item + +from test import _common +from test._common import unittest +from test.helper import TestHelper + + +def _parsetime(s): + return time.mktime(datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) + + +def _is_windows(): + return platform.system() == "Windows" + + +class MetaSyncTest(_common.TestCase, TestHelper): + itunes_library_unix = os.path.join(_common.RSRC, + b'itunes_library_unix.xml') + itunes_library_windows = os.path.join(_common.RSRC, + b'itunes_library_windows.xml') + + def setUp(self): + self.setup_beets() + self.load_plugins('metasync') + + self.config['metasync']['source'] = 'itunes' + + if _is_windows(): + self.config['metasync']['itunes']['library'] = \ + self.itunes_library_windows + else: + self.config['metasync']['itunes']['library'] = \ + self.itunes_library_unix + + self._set_up_data() + + def _set_up_data(self): + items = [_common.item() for _ in range(2)] + + items[0].title = 'Tessellate' + items[0].artist = 'alt-J' + items[0].albumartist = 'alt-J' + items[0].album = 'An Awesome Wave' + items[0].itunes_rating = 60 + + items[1].title = 'Breezeblocks' + items[1].artist = 'alt-J' + items[1].albumartist = 'alt-J' + items[1].album = 'An Awesome Wave' + + if _is_windows(): + items[0].path = \ + u'G:\\Music\\Alt-J\\An Awesome Wave\\03 Tessellate.mp3' + items[1].path = \ + u'G:\\Music\\Alt-J\\An Awesome Wave\\04 Breezeblocks.mp3' + else: + items[0].path = u'/Music/Alt-J/An Awesome Wave/03 Tessellate.mp3' + items[1].path = u'/Music/Alt-J/An Awesome Wave/04 Breezeblocks.mp3' + + for item in items: + self.lib.add(item) + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + + def test_load_item_types(self): + # This test also verifies that the MetaSources have loaded correctly + self.assertIn('amarok_score', Item._types) + self.assertIn('itunes_rating', Item._types) + + def test_pretend_sync_from_itunes(self): + out = self.run_with_output('metasync', '-p') + + self.assertIn('itunes_rating: 60 -> 80', out) + self.assertIn('itunes_rating: 100', out) + self.assertIn('itunes_playcount: 31', out) + self.assertIn('itunes_skipcount: 3', out) + self.assertIn('itunes_lastplayed: 2015-05-04 12:20:51', out) + self.assertIn('itunes_lastskipped: 2015-02-05 15:41:04', out) + self.assertEqual(self.lib.items()[0].itunes_rating, 60) + + def test_sync_from_itunes(self): + self.run_command('metasync') + + self.assertEqual(self.lib.items()[0].itunes_rating, 80) + self.assertEqual(self.lib.items()[0].itunes_playcount, 0) + self.assertEqual(self.lib.items()[0].itunes_skipcount, 3) + self.assertFalse(hasattr(self.lib.items()[0], 'itunes_lastplayed')) + self.assertEqual(self.lib.items()[0].itunes_lastskipped, + _parsetime('2015-02-05 15:41:04')) + + self.assertEqual(self.lib.items()[1].itunes_rating, 100) + self.assertEqual(self.lib.items()[1].itunes_playcount, 31) + self.assertEqual(self.lib.items()[1].itunes_skipcount, 0) + self.assertEqual(self.lib.items()[1].itunes_lastplayed, + _parsetime('2015-05-04 12:20:51')) + self.assertFalse(hasattr(self.lib.items()[1], 'itunes_lastskipped')) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_mpdstats.py beets-1.3.19/test/test_mpdstats.py --- beets-1.3.8+dfsg/test/test_mpdstats.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_mpdstats.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016 +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +from mock import Mock, patch, call, ANY +from test._common import unittest +from test.helper import TestHelper + +from beets.library import Item +from beetsplug.mpdstats import MPDStats +from beets import util + + +class MPDStatsTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + self.load_plugins('mpdstats') + + def tearDown(self): + self.teardown_beets() + self.unload_plugins() + + def test_update_rating(self): + item = Item(title=u'title', path='', id=1) + item.add(self.lib) + + log = Mock() + mpdstats = MPDStats(self.lib, log) + + self.assertFalse(mpdstats.update_rating(item, True)) + self.assertFalse(mpdstats.update_rating(None, True)) + + def test_get_item(self): + item_path = util.normpath('/foo/bar.flac') + item = Item(title=u'title', path=item_path, id=1) + item.add(self.lib) + + log = Mock() + mpdstats = MPDStats(self.lib, log) + + self.assertEqual(str(mpdstats.get_item(item_path)), str(item)) + self.assertIsNone(mpdstats.get_item('/some/non-existing/path')) + self.assertIn(u'item not found:', log.info.call_args[0][0]) + + FAKE_UNKNOWN_STATE = 'some-unknown-one' + STATUSES = [{'state': FAKE_UNKNOWN_STATE}, + {'state': u'pause'}, + {'state': u'play', 'songid': 1, 'time': u'0:1'}, + {'state': u'stop'}] + EVENTS = [["player"]] * (len(STATUSES) - 1) + [KeyboardInterrupt] + item_path = util.normpath('/foo/bar.flac') + + @patch("beetsplug.mpdstats.MPDClientWrapper", return_value=Mock(**{ + "events.side_effect": EVENTS, "status.side_effect": STATUSES, + "playlist.return_value": {1: item_path}})) + def test_run_mpdstats(self, mpd_mock): + item = Item(title=u'title', path=self.item_path, id=1) + item.add(self.lib) + + log = Mock() + try: + MPDStats(self.lib, log).run() + except KeyboardInterrupt: + pass + + log.debug.assert_has_calls( + [call(u'unhandled status "{0}"', ANY)]) + log.info.assert_has_calls( + [call(u'pause'), call(u'playing {0}', ANY), call(u'stop')]) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_permissions.py beets-1.3.19/test/test_permissions.py --- beets-1.3.8+dfsg/test/test_permissions.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_permissions.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +"""Tests for the 'permissions' plugin. +""" +from __future__ import division, absolute_import, print_function + +import os +import platform +from mock import patch, Mock + +from test._common import unittest +from test.helper import TestHelper +from beetsplug.permissions import (check_permissions, + convert_perm, + dirs_in_library) + + +class PermissionsPluginTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + self.load_plugins('permissions') + + self.config['permissions'] = { + 'file': '777', + 'dir': '777'} + + def tearDown(self): + self.teardown_beets() + self.unload_plugins() + + def test_permissions_on_album_imported(self): + self.do_thing(True) + + def test_permissions_on_item_imported(self): + self.config['import']['singletons'] = True + self.do_thing(True) + + @patch("os.chmod", Mock()) + def test_failing_to_set_permissions(self): + self.do_thing(False) + + def do_thing(self, expect_success): + if platform.system() == 'Windows': + self.skipTest('permissions not available on Windows') + + def get_stat(v): + return os.stat( + os.path.join(self.temp_dir, b'import', *v)).st_mode & 0o777 + self.importer = self.create_importer() + typs = ['file', 'dir'] + + track_file = (b'album 0', b'track 0.mp3') + self.exp_perms = { + True: {k: convert_perm(self.config['permissions'][k].get()) + for k in typs}, + False: {k: get_stat(v) for (k, v) in zip(typs, (track_file, ()))} + } + + self.importer.run() + item = self.lib.items().get() + + self.assertPerms(item.path, 'file', expect_success) + + for path in dirs_in_library(self.lib.directory, item.path): + self.assertPerms(path, 'dir', expect_success) + + def assertPerms(self, path, typ, expect_success): # noqa + for x in [(True, self.exp_perms[expect_success][typ], '!='), + (False, self.exp_perms[not expect_success][typ], '==')]: + self.assertEqual(x[0], check_permissions(path, x[1]), + msg=u'{} : {} {} {}'.format( + path, oct(os.stat(path).st_mode), x[2], oct(x[1]))) + + def test_convert_perm_from_string(self): + self.assertEqual(convert_perm('10'), 8) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_pipeline.py beets-1.3.19/test/test_pipeline.py --- beets-1.3.8+dfsg/test/test_pipeline.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_pipeline.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,7 +15,9 @@ """Test the "pipeline.py" restricted parallel programming library. """ -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test._common import unittest from beets.util import pipeline @@ -130,7 +133,7 @@ pl = pipeline.Pipeline((_produce(), _exc_work())) pull = pl.pull() for i in range(3): - pull.next() + next(pull) self.assertRaises(TestException, pull.next) diff -Nru beets-1.3.8+dfsg/test/test_player.py beets-1.3.19/test/test_player.py --- beets-1.3.8+dfsg/test/test_player.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_player.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,48 +15,50 @@ """Tests for BPD and music playing. """ -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test._common import unittest from beetsplug import bpd class CommandParseTest(unittest.TestCase): def test_no_args(self): - s = ur'command' + s = r'command' c = bpd.Command(s) self.assertEqual(c.name, u'command') self.assertEqual(c.args, []) def test_one_unquoted_arg(self): - s = ur'command hello' + s = r'command hello' c = bpd.Command(s) self.assertEqual(c.name, u'command') self.assertEqual(c.args, [u'hello']) def test_two_unquoted_args(self): - s = ur'command hello there' + s = r'command hello there' c = bpd.Command(s) self.assertEqual(c.name, u'command') self.assertEqual(c.args, [u'hello', u'there']) def test_one_quoted_arg(self): - s = ur'command "hello there"' + s = r'command "hello there"' c = bpd.Command(s) self.assertEqual(c.name, u'command') self.assertEqual(c.args, [u'hello there']) def test_heterogenous_args(self): - s = ur'command "hello there" sir' + s = r'command "hello there" sir' c = bpd.Command(s) self.assertEqual(c.name, u'command') self.assertEqual(c.args, [u'hello there', u'sir']) def test_quote_in_arg(self): - s = ur'command "hello \" there"' + s = r'command "hello \" there"' c = bpd.Command(s) self.assertEqual(c.args, [u'hello " there']) def test_backslash_in_arg(self): - s = ur'command "hello \\ there"' + s = r'command "hello \\ there"' c = bpd.Command(s) self.assertEqual(c.args, [u'hello \ there']) diff -Nru beets-1.3.8+dfsg/test/test_play.py beets-1.3.19/test/test_play.py --- beets-1.3.8+dfsg/test/test_play.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_play.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Jesse Weinstein +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the play plugin""" + +from __future__ import division, absolute_import, print_function + +import os + +from mock import patch, ANY + +from test._common import unittest +from test.helper import TestHelper, control_stdin + +from beets.ui import UserError +from beets.util import open_anything + + +class PlayPluginTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + self.load_plugins('play') + self.item = self.add_item(album=u'a nice älbum', title=u'aNiceTitle') + self.lib.add_album([self.item]) + self.open_patcher = patch('beetsplug.play.util.interactive_open') + self.open_mock = self.open_patcher.start() + self.config['play']['command'] = 'echo' + + def tearDown(self): + self.open_patcher.stop() + self.teardown_beets() + self.unload_plugins() + + def do_test(self, args=('title:aNiceTitle',), expected_cmd='echo', + expected_playlist=None): + self.run_command('play', *args) + + self.open_mock.assert_called_once_with(ANY, expected_cmd) + expected_playlist = expected_playlist or self.item.path.decode('utf-8') + exp_playlist = expected_playlist + u'\n' + with open(self.open_mock.call_args[0][0][0], 'rb') as playlist: + self.assertEqual(exp_playlist, playlist.read().decode('utf-8')) + + def test_basic(self): + self.do_test() + + def test_album_option(self): + self.do_test([u'-a', u'nice']) + + def test_args_option(self): + self.do_test([u'-A', u'foo', u'title:aNiceTitle'], u'echo foo') + + def test_args_option_in_middle(self): + self.config['play']['command'] = 'echo $args other' + + self.do_test([u'-A', u'foo', u'title:aNiceTitle'], u'echo foo other') + + def test_relative_to(self): + self.config['play']['command'] = 'echo' + self.config['play']['relative_to'] = '/something' + + path = os.path.relpath(self.item.path, b'/something') + playlist = path.decode('utf8') + self.do_test(expected_cmd='echo', expected_playlist=playlist) + + def test_use_folders(self): + self.config['play']['command'] = None + self.config['play']['use_folders'] = True + self.run_command('play', '-a', 'nice') + + self.open_mock.assert_called_once_with(ANY, open_anything()) + playlist = open(self.open_mock.call_args[0][0][0], 'rb') + self.assertEqual(u'{}\n'.format( + os.path.dirname(self.item.path.decode('utf-8'))), + playlist.read().decode('utf-8')) + + def test_raw(self): + self.config['play']['raw'] = True + + self.run_command(u'play', u'nice') + + self.open_mock.assert_called_once_with([self.item.path], 'echo') + + def test_not_found(self): + self.run_command(u'play', u'not found') + + self.open_mock.assert_not_called() + + def test_warning_threshold(self): + self.config['play']['warning_threshold'] = 1 + self.add_item(title='another NiceTitle') + + with control_stdin("a"): + self.run_command(u'play', u'nice') + + self.open_mock.assert_not_called() + + def test_warning_threshold_backwards_compat(self): + self.config['play']['warning_treshold'] = 1 + self.add_item(title=u'another NiceTitle') + + with control_stdin("a"): + self.run_command(u'play', u'nice') + + self.open_mock.assert_not_called() + + def test_command_failed(self): + self.open_mock.side_effect = OSError(u"some reason") + + with self.assertRaises(UserError): + self.run_command(u'play', u'title:aNiceTitle') + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_plexupdate.py beets-1.3.19/test/test_plexupdate.py --- beets-1.3.8+dfsg/test/test_plexupdate.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_plexupdate.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import TestHelper +from beetsplug.plexupdate import get_music_section, update_plex +import responses + + +class PlexUpdateTest(unittest.TestCase, TestHelper): + def add_response_get_music_section(self, section_name='Music'): + """Create response for mocking the get_music_section function. + """ + + escaped_section_name = section_name.replace('"', '\\"') + + body = ( + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '') + status = 200 + content_type = 'text/xml;charset=utf-8' + + responses.add(responses.GET, + 'http://localhost:32400/library/sections', + body=body, + status=status, + content_type=content_type) + + def add_response_update_plex(self): + """Create response for mocking the update_plex function. + """ + body = '' + status = 200 + content_type = 'text/html' + + responses.add(responses.GET, + 'http://localhost:32400/library/sections/2/refresh', + body=body, + status=status, + content_type=content_type) + + def setUp(self): + self.setup_beets() + self.load_plugins('plexupdate') + + self.config['plex'] = { + u'host': u'localhost', + u'port': 32400} + + def tearDown(self): + self.teardown_beets() + self.unload_plugins() + + @responses.activate + def test_get_music_section(self): + # Adding response. + self.add_response_get_music_section() + + # Test if section key is "2" out of the mocking data. + self.assertEqual(get_music_section( + self.config['plex']['host'], + self.config['plex']['port'], + self.config['plex']['token'], + self.config['plex']['library_name'].get()), '2') + + @responses.activate + def test_get_named_music_section(self): + # Adding response. + self.add_response_get_music_section('My Music Library') + + self.assertEqual(get_music_section( + self.config['plex']['host'], + self.config['plex']['port'], + self.config['plex']['token'], + 'My Music Library'), '2') + + @responses.activate + def test_update_plex(self): + # Adding responses. + self.add_response_get_music_section() + self.add_response_update_plex() + + # Testing status code of the mocking request. + self.assertEqual(update_plex( + self.config['plex']['host'], + self.config['plex']['port'], + self.config['plex']['token'], + self.config['plex']['library_name'].get()).status_code, 200) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_plugins.py beets-1.3.19/test/test_plugins.py --- beets-1.3.8+dfsg/test/test_plugins.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/test/test_plugins.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,14 +13,25 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -from mock import patch -from _common import unittest -import helper +from __future__ import division, absolute_import, print_function -from beets import plugins +import os +from mock import patch, Mock, ANY +import shutil +import itertools + +from beets.importer import SingletonImportTask, SentinelImportTask, \ + ArchiveImportTask, action +from beets import plugins, config, ui from beets.library import Item from beets.dbcore import types from beets.mediafile import MediaFile +from beets.util import displayable_path, bytestring_path + +from test.test_importer import ImportHelper, AutotagStub +from test.test_ui_importer import TerminalImportSessionSetup +from test._common import unittest, RSRC +from test import helper class TestHelper(helper.TestHelper): @@ -27,6 +39,7 @@ def setup_plugin_loader(self): # FIXME the mocking code is horrific, but this is the lowest and # earliest level of the plugin mechanism we can hook into. + self.load_plugins() self._plugin_loader_patch = patch('beets.plugins.load_plugins') self._plugin_classes = set() load_plugins = self._plugin_loader_patch.start() @@ -48,7 +61,6 @@ def setUp(self): self.setup_plugin_loader() - self.setup_beets() def tearDown(self): self.teardown_plugin_loader() @@ -61,22 +73,22 @@ self.register_plugin(RatingPlugin) self.config['plugins'] = 'rating' - item = Item(path='apath', artist='aaa') + item = Item(path=u'apath', artist=u'aaa') item.add(self.lib) # Do not match unset values - out = self.run_with_output('ls', 'rating:1..3') - self.assertNotIn('aaa', out) + out = self.run_with_output(u'ls', u'rating:1..3') + self.assertNotIn(u'aaa', out) - self.run_command('modify', 'rating=2', '--yes') + self.run_command(u'modify', u'rating=2', u'--yes') # Match in range - out = self.run_with_output('ls', 'rating:1..3') - self.assertIn('aaa', out) + out = self.run_with_output(u'ls', u'rating:1..3') + self.assertIn(u'aaa', out) # Don't match out of range - out = self.run_with_output('ls', 'rating:3..5') - self.assertNotIn('aaa', out) + out = self.run_with_output(u'ls', u'rating:3..5') + self.assertNotIn(u'aaa', out) class ItemWriteTest(unittest.TestCase, TestHelper): @@ -87,7 +99,7 @@ class EventListenerPlugin(plugins.BeetsPlugin): pass - self.event_listener_plugin = EventListenerPlugin + self.event_listener_plugin = EventListenerPlugin() self.register_plugin(EventListenerPlugin) def tearDown(self): @@ -97,21 +109,456 @@ def test_change_tags(self): def on_write(item=None, path=None, tags=None): - if tags['artist'] == 'XXX': - tags['artist'] = 'YYY' + if tags['artist'] == u'XXX': + tags['artist'] = u'YYY' self.register_listener('write', on_write) - item = self.add_item_fixture(artist='XXX') + item = self.add_item_fixture(artist=u'XXX') item.write() mediafile = MediaFile(item.path) - self.assertEqual(mediafile.artist, 'YYY') + self.assertEqual(mediafile.artist, u'YYY') def register_listener(self, event, func): self.event_listener_plugin.register_listener(event, func) +class ItemTypeConflictTest(unittest.TestCase, TestHelper): + + def setUp(self): + self.setup_plugin_loader() + self.setup_beets() + + def tearDown(self): + self.teardown_plugin_loader() + self.teardown_beets() + + def test_mismatch(self): + class EventListenerPlugin(plugins.BeetsPlugin): + item_types = {'duplicate': types.INTEGER} + + class AdventListenerPlugin(plugins.BeetsPlugin): + item_types = {'duplicate': types.FLOAT} + + self.event_listener_plugin = EventListenerPlugin + self.advent_listener_plugin = AdventListenerPlugin + self.register_plugin(EventListenerPlugin) + self.register_plugin(AdventListenerPlugin) + self.assertRaises(plugins.PluginConflictException, + plugins.types, Item + ) + + def test_match(self): + class EventListenerPlugin(plugins.BeetsPlugin): + item_types = {'duplicate': types.INTEGER} + + class AdventListenerPlugin(plugins.BeetsPlugin): + item_types = {'duplicate': types.INTEGER} + + self.event_listener_plugin = EventListenerPlugin + self.advent_listener_plugin = AdventListenerPlugin + self.register_plugin(EventListenerPlugin) + self.register_plugin(AdventListenerPlugin) + self.assertNotEqual(None, plugins.types(Item)) + + +class EventsTest(unittest.TestCase, ImportHelper, TestHelper): + + def setUp(self): + self.setup_plugin_loader() + self.setup_beets() + self.__create_import_dir(2) + config['import']['pretend'] = True + + def tearDown(self): + self.teardown_plugin_loader() + self.teardown_beets() + + def __copy_file(self, dest_path, metadata): + # Copy files + resource_path = os.path.join(RSRC, b'full.mp3') + shutil.copy(resource_path, dest_path) + medium = MediaFile(dest_path) + # Set metadata + for attr in metadata: + setattr(medium, attr, metadata[attr]) + medium.save() + + def __create_import_dir(self, count): + self.import_dir = os.path.join(self.temp_dir, b'testsrcdir') + if os.path.isdir(self.import_dir): + shutil.rmtree(self.import_dir) + + self.album_path = os.path.join(self.import_dir, b'album') + os.makedirs(self.album_path) + + metadata = { + 'artist': u'Tag Artist', + 'album': u'Tag Album', + 'albumartist': None, + 'mb_trackid': None, + 'mb_albumid': None, + 'comp': None + } + self.file_paths = [] + for i in range(count): + metadata['track'] = i + 1 + metadata['title'] = u'Tag Title Album %d' % (i + 1) + track_file = bytestring_path('%02d - track.mp3' % (i + 1)) + dest_path = os.path.join(self.album_path, track_file) + self.__copy_file(dest_path, metadata) + self.file_paths.append(dest_path) + + def test_import_task_created(self): + import_files = [self.import_dir] + self._setup_import_session(singletons=False) + self.importer.paths = import_files + + with helper.capture_log() as logs: + self.importer.run() + self.unload_plugins() + + # Exactly one event should have been imported (for the album). + # Sentinels do not get emitted. + self.assertEqual(logs.count(u'Sending event: import_task_created'), 1) + + logs = [line for line in logs if not line.startswith( + u'Sending event:')] + self.assertEqual(logs, [ + u'Album: {0}'.format(displayable_path( + os.path.join(self.import_dir, b'album'))), + u' {0}'.format(displayable_path(self.file_paths[0])), + u' {0}'.format(displayable_path(self.file_paths[1])), + ]) + + def test_import_task_created_with_plugin(self): + class ToSingletonPlugin(plugins.BeetsPlugin): + def __init__(self): + super(ToSingletonPlugin, self).__init__() + + self.register_listener('import_task_created', + self.import_task_created_event) + + def import_task_created_event(self, session, task): + if isinstance(task, SingletonImportTask) \ + or isinstance(task, SentinelImportTask)\ + or isinstance(task, ArchiveImportTask): + return task + + new_tasks = [] + for item in task.items: + new_tasks.append(SingletonImportTask(task.toppath, item)) + + return new_tasks + + to_singleton_plugin = ToSingletonPlugin + self.register_plugin(to_singleton_plugin) + + import_files = [self.import_dir] + self._setup_import_session(singletons=False) + self.importer.paths = import_files + + with helper.capture_log() as logs: + self.importer.run() + self.unload_plugins() + + # Exactly one event should have been imported (for the album). + # Sentinels do not get emitted. + self.assertEqual(logs.count(u'Sending event: import_task_created'), 1) + + logs = [line for line in logs if not line.startswith( + u'Sending event:')] + self.assertEqual(logs, [ + u'Singleton: {0}'.format(displayable_path(self.file_paths[0])), + u'Singleton: {0}'.format(displayable_path(self.file_paths[1])), + ]) + + +class HelpersTest(unittest.TestCase): + + def test_sanitize_choices(self): + self.assertEqual( + plugins.sanitize_choices([u'A', u'Z'], (u'A', u'B')), [u'A']) + self.assertEqual( + plugins.sanitize_choices([u'A', u'A'], (u'A')), [u'A']) + self.assertEqual( + plugins.sanitize_choices([u'D', u'*', u'A'], + (u'A', u'B', u'C', u'D')), + [u'D', u'B', u'C', u'A']) + + +class ListenersTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_plugin_loader() + + def tearDown(self): + self.teardown_plugin_loader() + self.teardown_beets() + + def test_register(self): + + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('cli_exit', self.dummy) + self.register_listener('cli_exit', self.dummy) + + def dummy(self): + pass + + d = DummyPlugin() + self.assertEqual(DummyPlugin._raw_listeners['cli_exit'], [d.dummy]) + + d2 = DummyPlugin() + self.assertEqual(DummyPlugin._raw_listeners['cli_exit'], + [d.dummy, d2.dummy]) + + d.register_listener('cli_exit', d2.dummy) + self.assertEqual(DummyPlugin._raw_listeners['cli_exit'], + [d.dummy, d2.dummy]) + + @patch('beets.plugins.find_plugins') + @patch('beets.plugins.inspect') + def test_events_called(self, mock_inspect, mock_find_plugins): + mock_inspect.getargspec.return_value = None + + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.foo = Mock(__name__='foo') + self.register_listener('event_foo', self.foo) + self.bar = Mock(__name__='bar') + self.register_listener('event_bar', self.bar) + + d = DummyPlugin() + mock_find_plugins.return_value = d, + + plugins.send('event') + d.foo.assert_has_calls([]) + d.bar.assert_has_calls([]) + + plugins.send('event_foo', var=u"tagada") + d.foo.assert_called_once_with(var=u"tagada") + d.bar.assert_has_calls([]) + + @patch('beets.plugins.find_plugins') + def test_listener_params(self, mock_find_plugins): + test = self + + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + for i in itertools.count(1): + try: + meth = getattr(self, 'dummy{0}'.format(i)) + except AttributeError: + break + self.register_listener('event{0}'.format(i), meth) + + def dummy1(self, foo): + test.assertEqual(foo, 5) + + def dummy2(self, foo=None): + test.assertEqual(foo, 5) + + def dummy3(self): + # argument cut off + pass + + def dummy4(self, bar=None): + # argument cut off + pass + + def dummy5(self, bar): + test.assertFalse(True) + + # more complex exmaples + + def dummy6(self, foo, bar=None): + test.assertEqual(foo, 5) + test.assertEqual(bar, None) + + def dummy7(self, foo, **kwargs): + test.assertEqual(foo, 5) + test.assertEqual(kwargs, {}) + + def dummy8(self, foo, bar, **kwargs): + test.assertFalse(True) + + def dummy9(self, **kwargs): + test.assertEqual(kwargs, {"foo": 5}) + + d = DummyPlugin() + mock_find_plugins.return_value = d, + + plugins.send('event1', foo=5) + plugins.send('event2', foo=5) + plugins.send('event3', foo=5) + plugins.send('event4', foo=5) + + with self.assertRaises(TypeError): + plugins.send('event5', foo=5) + + plugins.send('event6', foo=5) + plugins.send('event7', foo=5) + + with self.assertRaises(TypeError): + plugins.send('event8', foo=5) + + plugins.send('event9', foo=5) + + +class PromptChoicesTest(TerminalImportSessionSetup, unittest.TestCase, + ImportHelper, TestHelper): + def setUp(self): + self.setup_plugin_loader() + self.setup_beets() + self._create_import_dir(3) + self._setup_import_session() + self.matcher = AutotagStub().install() + # keep track of ui.input_option() calls + self.input_options_patcher = patch('beets.ui.input_options', + side_effect=ui.input_options) + self.mock_input_options = self.input_options_patcher.start() + + def tearDown(self): + self.input_options_patcher.stop() + self.teardown_plugin_loader() + self.teardown_beets() + self.matcher.restore() + + def test_plugin_choices_in_ui_input_options_album(self): + """Test the presence of plugin choices on the prompt (album).""" + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.return_choices) + + def return_choices(self, session, task): + return [ui.commands.PromptChoice('f', u'Foo', None), + ui.commands.PromptChoice('r', u'baR', None)] + + self.register_plugin(DummyPlugin) + # Default options + extra choices by the plugin ('Foo', 'Bar') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') + (u'Foo', u'baR') + + self.importer.add_choice(action.SKIP) + self.importer.run() + self.mock_input_options.assert_called_once_with(opts, default='a', + require=ANY) + + def test_plugin_choices_in_ui_input_options_singleton(self): + """Test the presence of plugin choices on the prompt (singleton).""" + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.return_choices) + + def return_choices(self, session, task): + return [ui.commands.PromptChoice('f', u'Foo', None), + ui.commands.PromptChoice('r', u'baR', None)] + + self.register_plugin(DummyPlugin) + # Default options + extra choices by the plugin ('Foo', 'Bar') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'Enter search', + u'enter Id', u'aBort') + (u'Foo', u'baR') + + config['import']['singletons'] = True + self.importer.add_choice(action.SKIP) + self.importer.run() + self.mock_input_options.assert_called_with(opts, default='a', + require=ANY) + + def test_choices_conflicts(self): + """Test the short letter conflict solving.""" + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.return_choices) + + def return_choices(self, session, task): + return [ui.commands.PromptChoice('a', u'A foo', None), # dupe + ui.commands.PromptChoice('z', u'baZ', None), # ok + ui.commands.PromptChoice('z', u'Zupe', None), # dupe + ui.commands.PromptChoice('z', u'Zoo', None)] # dupe + + self.register_plugin(DummyPlugin) + # Default options + not dupe extra choices by the plugin ('baZ') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') + (u'baZ',) + self.importer.add_choice(action.SKIP) + self.importer.run() + self.mock_input_options.assert_called_once_with(opts, default='a', + require=ANY) + + def test_plugin_callback(self): + """Test that plugin callbacks are being called upon user choice.""" + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.return_choices) + + def return_choices(self, session, task): + return [ui.commands.PromptChoice('f', u'Foo', self.foo)] + + def foo(self, session, task): + pass + + self.register_plugin(DummyPlugin) + # Default options + extra choices by the plugin ('Foo', 'Bar') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') + (u'Foo',) + + # DummyPlugin.foo() should be called once + with patch.object(DummyPlugin, 'foo', autospec=True) as mock_foo: + with helper.control_stdin('\n'.join(['f', 's'])): + self.importer.run() + self.assertEqual(mock_foo.call_count, 1) + + # input_options should be called twice, as foo() returns None + self.assertEqual(self.mock_input_options.call_count, 2) + self.mock_input_options.assert_called_with(opts, default='a', + require=ANY) + + def test_plugin_callback_return(self): + """Test that plugin callbacks that return a value exit the loop.""" + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + super(DummyPlugin, self).__init__() + self.register_listener('before_choose_candidate', + self.return_choices) + + def return_choices(self, session, task): + return [ui.commands.PromptChoice('f', u'Foo', self.foo)] + + def foo(self, session, task): + return action.SKIP + + self.register_plugin(DummyPlugin) + # Default options + extra choices by the plugin ('Foo', 'Bar') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') + (u'Foo',) + + # DummyPlugin.foo() should be called once + with helper.control_stdin('f\n'): + self.importer.run() + + # input_options should be called once, as foo() returns SKIP + self.mock_input_options.assert_called_once_with(opts, default='a', + require=ANY) + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_query.py beets-1.3.19/test/test_query.py --- beets-1.3.8+dfsg/test/test_query.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/test/test_query.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,25 +15,35 @@ """Various tests for querying the library database. """ -import _common -from _common import unittest -import helper +from __future__ import division, absolute_import, print_function + +from functools import partial +from mock import patch +import os +import sys + +from test import _common +from test._common import unittest +from test import helper import beets.library from beets import dbcore from beets.dbcore import types -from beets.dbcore.query import NoneQuery +from beets.dbcore.query import (NoneQuery, ParsingError, + InvalidQueryArgumentTypeError) from beets.library import Library, Item +from beets import util +import platform class TestHelper(helper.TestHelper): - def assertInResult(self, item, results): - result_ids = map(lambda i: i.id, results) + def assertInResult(self, item, results): # noqa + result_ids = [i.id for i in results] self.assertIn(item.id, result_ids) - def assertNotInResult(self, item, results): - result_ids = map(lambda i: i.id, results) + def assertNotInResult(self, item, results): # noqa + result_ids = [i.id for i in results] self.assertNotIn(item.id, result_ids) @@ -45,20 +56,33 @@ self.assertEqual(self.lib.items(q).get().title, 'the title') def test_restriction_completeness(self): - q = dbcore.query.AnyFieldQuery('title', ['title'], + q = dbcore.query.AnyFieldQuery('title', [u'title'], dbcore.query.SubstringQuery) - self.assertEqual(self.lib.items(q).get().title, 'the title') + self.assertEqual(self.lib.items(q).get().title, u'the title') def test_restriction_soundness(self): - q = dbcore.query.AnyFieldQuery('title', ['artist'], + q = dbcore.query.AnyFieldQuery('title', [u'artist'], dbcore.query.SubstringQuery) self.assertEqual(self.lib.items(q).get(), None) + def test_eq(self): + q1 = dbcore.query.AnyFieldQuery('foo', [u'bar'], + dbcore.query.SubstringQuery) + q2 = dbcore.query.AnyFieldQuery('foo', [u'bar'], + dbcore.query.SubstringQuery) + self.assertEqual(q1, q2) + + q2.query_class = None + self.assertNotEqual(q1, q2) + class AssertsMixin(object): - def assert_matched(self, results, titles): + def assert_items_matched(self, results, titles): self.assertEqual([i.title for i in results], titles) + def assert_albums_matched(self, results, albums): + self.assertEqual([a.album for a in results], albums) + # A test case class providing a library with some dummy data and some # assertions involving that data. @@ -67,196 +91,191 @@ super(DummyDataTestCase, self).setUp() self.lib = beets.library.Library(':memory:') items = [_common.item() for _ in range(3)] - items[0].title = 'foo bar' - items[0].artist = 'one' - items[0].album = 'baz' + items[0].title = u'foo bar' + items[0].artist = u'one' + items[0].album = u'baz' items[0].year = 2001 items[0].comp = True - items[1].title = 'baz qux' - items[1].artist = 'two' - items[1].album = 'baz' + items[1].title = u'baz qux' + items[1].artist = u'two' + items[1].album = u'baz' items[1].year = 2002 items[1].comp = True - items[2].title = 'beets 4 eva' - items[2].artist = 'three' - items[2].album = 'foo' + items[2].title = u'beets 4 eva' + items[2].artist = u'three' + items[2].album = u'foo' items[2].year = 2003 items[2].comp = False for item in items: self.lib.add(item) self.lib.add_album(items[:2]) - def assert_matched_all(self, results): - self.assert_matched(results, [ - 'foo bar', - 'baz qux', - 'beets 4 eva', + def assert_items_matched_all(self, results): + self.assert_items_matched(results, [ + u'foo bar', + u'baz qux', + u'beets 4 eva', ]) class GetTest(DummyDataTestCase): def test_get_empty(self): - q = '' + q = u'' results = self.lib.items(q) - self.assert_matched_all(results) + self.assert_items_matched_all(results) def test_get_none(self): q = None results = self.lib.items(q) - self.assert_matched_all(results) + self.assert_items_matched_all(results) def test_get_one_keyed_term(self): - q = 'title:qux' + q = u'title:qux' results = self.lib.items(q) - self.assert_matched(results, ['baz qux']) + self.assert_items_matched(results, [u'baz qux']) def test_get_one_keyed_regexp(self): - q = r'artist::t.+r' + q = u'artist::t.+r' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_get_one_unkeyed_term(self): - q = 'three' + q = u'three' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_get_one_unkeyed_regexp(self): - q = r':x$' + q = u':x$' results = self.lib.items(q) - self.assert_matched(results, ['baz qux']) + self.assert_items_matched(results, [u'baz qux']) def test_get_no_matches(self): - q = 'popebear' + q = u'popebear' results = self.lib.items(q) - self.assert_matched(results, []) + self.assert_items_matched(results, []) def test_invalid_key(self): - q = 'pope:bear' + q = u'pope:bear' results = self.lib.items(q) # Matches nothing since the flexattr is not present on the # objects. - self.assert_matched(results, []) + self.assert_items_matched(results, []) def test_term_case_insensitive(self): - q = 'oNE' + q = u'oNE' results = self.lib.items(q) - self.assert_matched(results, ['foo bar']) + self.assert_items_matched(results, [u'foo bar']) def test_regexp_case_sensitive(self): - q = r':oNE' + q = u':oNE' results = self.lib.items(q) - self.assert_matched(results, []) - q = r':one' + self.assert_items_matched(results, []) + q = u':one' results = self.lib.items(q) - self.assert_matched(results, ['foo bar']) + self.assert_items_matched(results, [u'foo bar']) def test_term_case_insensitive_with_key(self): - q = 'artist:thrEE' + q = u'artist:thrEE' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_key_case_insensitive(self): - q = 'ArTiST:three' + q = u'ArTiST:three' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_unkeyed_term_matches_multiple_columns(self): - q = 'baz' + q = u'baz' results = self.lib.items(q) - self.assert_matched(results, [ - 'foo bar', - 'baz qux', + self.assert_items_matched(results, [ + u'foo bar', + u'baz qux', ]) def test_unkeyed_regexp_matches_multiple_columns(self): - q = r':z$' + q = u':z$' results = self.lib.items(q) - self.assert_matched(results, [ - 'foo bar', - 'baz qux', + self.assert_items_matched(results, [ + u'foo bar', + u'baz qux', ]) def test_keyed_term_matches_only_one_column(self): - q = 'title:baz' + q = u'title:baz' results = self.lib.items(q) - self.assert_matched(results, ['baz qux']) + self.assert_items_matched(results, [u'baz qux']) def test_keyed_regexp_matches_only_one_column(self): - q = r'title::baz' + q = u'title::baz' results = self.lib.items(q) - self.assert_matched(results, [ - 'baz qux', + self.assert_items_matched(results, [ + u'baz qux', ]) def test_multiple_terms_narrow_search(self): - q = 'qux baz' + q = u'qux baz' results = self.lib.items(q) - self.assert_matched(results, [ - 'baz qux', + self.assert_items_matched(results, [ + u'baz qux', ]) def test_multiple_regexps_narrow_search(self): - q = r':baz :qux' + q = u':baz :qux' results = self.lib.items(q) - self.assert_matched(results, ['baz qux']) + self.assert_items_matched(results, [u'baz qux']) def test_mixed_terms_regexps_narrow_search(self): - q = r':baz qux' + q = u':baz qux' results = self.lib.items(q) - self.assert_matched(results, ['baz qux']) + self.assert_items_matched(results, [u'baz qux']) def test_single_year(self): - q = 'year:2001' + q = u'year:2001' results = self.lib.items(q) - self.assert_matched(results, ['foo bar']) + self.assert_items_matched(results, [u'foo bar']) def test_year_range(self): - q = 'year:2000..2002' + q = u'year:2000..2002' results = self.lib.items(q) - self.assert_matched(results, [ - 'foo bar', - 'baz qux', + self.assert_items_matched(results, [ + u'foo bar', + u'baz qux', ]) - def test_bad_year(self): - q = 'year:delete from items' - results = self.lib.items(q) - self.assert_matched(results, []) - def test_singleton_true(self): - q = 'singleton:true' + q = u'singleton:true' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_singleton_false(self): - q = 'singleton:false' + q = u'singleton:false' results = self.lib.items(q) - self.assert_matched(results, ['foo bar', 'baz qux']) + self.assert_items_matched(results, [u'foo bar', u'baz qux']) def test_compilation_true(self): - q = 'comp:true' + q = u'comp:true' results = self.lib.items(q) - self.assert_matched(results, ['foo bar', 'baz qux']) + self.assert_items_matched(results, [u'foo bar', u'baz qux']) def test_compilation_false(self): - q = 'comp:false' + q = u'comp:false' results = self.lib.items(q) - self.assert_matched(results, ['beets 4 eva']) + self.assert_items_matched(results, [u'beets 4 eva']) def test_unknown_field_name_no_results(self): - q = 'xyzzy:nonsense' + q = u'xyzzy:nonsense' results = self.lib.items(q) titles = [i.title for i in results] self.assertEqual(titles, []) def test_unknown_field_name_no_results_in_album_query(self): - q = 'xyzzy:nonsense' + q = u'xyzzy:nonsense' results = self.lib.albums(q) names = [a.album for a in results] self.assertEqual(names, []) def test_item_field_name_matches_nothing_in_album_query(self): - q = 'format:nonsense' + q = u'format:nonsense' results = self.lib.albums(q) names = [a.album for a in results] self.assertEqual(names, []) @@ -268,22 +287,32 @@ q = u'title:caf\xe9' results = self.lib.items(q) - self.assert_matched(results, [u'caf\xe9']) + self.assert_items_matched(results, [u'caf\xe9']) def test_numeric_search_positive(self): - q = dbcore.query.NumericQuery('year', '2001') + q = dbcore.query.NumericQuery('year', u'2001') results = self.lib.items(q) self.assertTrue(results) def test_numeric_search_negative(self): - q = dbcore.query.NumericQuery('year', '1999') + q = dbcore.query.NumericQuery('year', u'1999') results = self.lib.items(q) self.assertFalse(results) - def test_numeric_empty(self): - q = dbcore.query.NumericQuery('year', '') - results = self.lib.items(q) - self.assertTrue(results) + def test_invalid_query(self): + with self.assertRaises(InvalidQueryArgumentTypeError) as raised: + dbcore.query.NumericQuery('year', u'199a') + self.assertIn(u'not an int', unicode(raised.exception)) + + with self.assertRaises(InvalidQueryArgumentTypeError) as raised: + dbcore.query.RegexpQuery('year', u'199(') + exception_text = unicode(raised.exception) + self.assertIn(u'not a regular expression', exception_text) + if sys.version_info >= (3, 5): + self.assertIn(u'unterminated subpattern', exception_text) + else: + self.assertIn(u'unbalanced parenthesis', exception_text) + self.assertIsInstance(raised.exception, ParsingError) class MatchTest(_common.TestCase): @@ -292,102 +321,320 @@ self.item = _common.item() def test_regex_match_positive(self): - q = dbcore.query.RegexpQuery('album', '^the album$') + q = dbcore.query.RegexpQuery('album', u'^the album$') self.assertTrue(q.match(self.item)) def test_regex_match_negative(self): - q = dbcore.query.RegexpQuery('album', '^album$') + q = dbcore.query.RegexpQuery('album', u'^album$') self.assertFalse(q.match(self.item)) def test_regex_match_non_string_value(self): - q = dbcore.query.RegexpQuery('disc', '^6$') + q = dbcore.query.RegexpQuery('disc', u'^6$') self.assertTrue(q.match(self.item)) def test_substring_match_positive(self): - q = dbcore.query.SubstringQuery('album', 'album') + q = dbcore.query.SubstringQuery('album', u'album') self.assertTrue(q.match(self.item)) def test_substring_match_negative(self): - q = dbcore.query.SubstringQuery('album', 'ablum') + q = dbcore.query.SubstringQuery('album', u'ablum') self.assertFalse(q.match(self.item)) def test_substring_match_non_string_value(self): - q = dbcore.query.SubstringQuery('disc', '6') + q = dbcore.query.SubstringQuery('disc', u'6') self.assertTrue(q.match(self.item)) def test_year_match_positive(self): - q = dbcore.query.NumericQuery('year', '1') + q = dbcore.query.NumericQuery('year', u'1') self.assertTrue(q.match(self.item)) def test_year_match_negative(self): - q = dbcore.query.NumericQuery('year', '10') + q = dbcore.query.NumericQuery('year', u'10') self.assertFalse(q.match(self.item)) def test_bitrate_range_positive(self): - q = dbcore.query.NumericQuery('bitrate', '100000..200000') + q = dbcore.query.NumericQuery('bitrate', u'100000..200000') self.assertTrue(q.match(self.item)) def test_bitrate_range_negative(self): - q = dbcore.query.NumericQuery('bitrate', '200000..300000') + q = dbcore.query.NumericQuery('bitrate', u'200000..300000') self.assertFalse(q.match(self.item)) + def test_open_range(self): + dbcore.query.NumericQuery('bitrate', u'100000..') + + def test_eq(self): + q1 = dbcore.query.MatchQuery('foo', u'bar') + q2 = dbcore.query.MatchQuery('foo', u'bar') + q3 = dbcore.query.MatchQuery('foo', u'baz') + q4 = dbcore.query.StringFieldQuery('foo', u'bar') + self.assertEqual(q1, q2) + self.assertNotEqual(q1, q3) + self.assertNotEqual(q1, q4) + self.assertNotEqual(q3, q4) -class PathQueryTest(_common.LibTestCase, AssertsMixin): + +class PathQueryTest(_common.LibTestCase, TestHelper, AssertsMixin): def setUp(self): super(PathQueryTest, self).setUp() - self.i.path = '/a/b/c.mp3' - self.i.title = 'path item' + + # This is the item we'll try to match. + self.i.path = util.normpath('/a/b/c.mp3') + self.i.title = u'path item' + self.i.album = u'path album' self.i.store() + self.lib.add_album([self.i]) + + # A second item for testing exclusion. + i2 = _common.item() + i2.path = util.normpath('/x/y/z.mp3') + i2.title = 'another item' + i2.album = 'another album' + self.lib.add(i2) + self.lib.add_album([i2]) + + # Unadorned path queries with path separators in them are considered + # path queries only when the path in question actually exists. So we + # mock the existence check to return true. + self.patcher_exists = patch('beets.library.os.path.exists') + self.patcher_exists.start().return_value = True + + # We have to create function samefile as it does not exist on + # Windows and python 2.7 + self.patcher_samefile = patch('beets.library.os.path.samefile', + create=True) + self.patcher_samefile.start().return_value = True + + def tearDown(self): + super(PathQueryTest, self).tearDown() + + self.patcher_samefile.stop() + self.patcher_exists.stop() def test_path_exact_match(self): - q = 'path:/a/b/c.mp3' + q = u'path:/a/b/c.mp3' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, [u'path item']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, []) def test_parent_directory_no_slash(self): - q = 'path:/a' + q = u'path:/a' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, [u'path item']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) def test_parent_directory_with_slash(self): - q = 'path:/a/' + q = u'path:/a/' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, [u'path item']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) def test_no_match(self): - q = 'path:/xyzzy/' + q = u'path:/xyzzy/' results = self.lib.items(q) - self.assert_matched(results, []) + self.assert_items_matched(results, []) + + results = self.lib.albums(q) + self.assert_albums_matched(results, []) def test_fragment_no_match(self): - q = 'path:/b/' + q = u'path:/b/' results = self.lib.items(q) - self.assert_matched(results, []) + self.assert_items_matched(results, []) + + results = self.lib.albums(q) + self.assert_albums_matched(results, []) def test_nonnorm_path(self): - q = 'path:/x/../a/b' + q = u'path:/x/../a/b' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, [u'path item']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) def test_slashed_query_matches_path(self): - q = '/a/b' + q = u'/a/b' + results = self.lib.items(q) + self.assert_items_matched(results, [u'path item']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) + + @unittest.skip('unfixed (#1865)') + def test_path_query_in_or_query(self): + q = '/a/b , /a/b' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, ['path item']) def test_non_slashed_does_not_match_path(self): - q = 'c.mp3' + q = u'c.mp3' results = self.lib.items(q) - self.assert_matched(results, []) + self.assert_items_matched(results, []) + + results = self.lib.albums(q) + self.assert_albums_matched(results, []) def test_slashes_in_explicit_field_does_not_match_path(self): - q = 'title:/a/b' + q = u'title:/a/b' + results = self.lib.items(q) + self.assert_items_matched(results, []) + + def test_path_item_regex(self): + q = u'path::c\\.mp3$' results = self.lib.items(q) - self.assert_matched(results, []) + self.assert_items_matched(results, [u'path item']) + + def test_path_album_regex(self): + q = u'path::b' + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'path album']) - def test_path_regex(self): - q = 'path::\\.mp3$' + def test_escape_underscore(self): + self.add_album(path=b'/a/_/title.mp3', title=u'with underscore', + album=u'album with underscore') + q = u'path:/a/_' results = self.lib.items(q) - self.assert_matched(results, ['path item']) + self.assert_items_matched(results, [u'with underscore']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with underscore']) + + def test_escape_percent(self): + self.add_album(path=b'/a/%/title.mp3', title=u'with percent', + album=u'album with percent') + q = u'path:/a/%' + results = self.lib.items(q) + self.assert_items_matched(results, [u'with percent']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with percent']) + + def test_escape_backslash(self): + self.add_album(path=br'/a/\x/title.mp3', title=u'with backslash', + album=u'album with backslash') + q = u'path:/a/\\\\x' + results = self.lib.items(q) + self.assert_items_matched(results, [u'with backslash']) + + results = self.lib.albums(q) + self.assert_albums_matched(results, [u'album with backslash']) + + def test_case_sensitivity(self): + self.add_album(path=b'/A/B/C2.mp3', title=u'caps path') + + makeq = partial(beets.library.PathQuery, u'path', '/A/B') + + results = self.lib.items(makeq(case_sensitive=True)) + self.assert_items_matched(results, [u'caps path']) + + results = self.lib.items(makeq(case_sensitive=False)) + self.assert_items_matched(results, [u'path item', u'caps path']) + + # Check for correct case sensitivity selection (this check + # only works on non-Windows OSes). + with _common.system_mock('Darwin'): + # exists = True and samefile = True => Case insensitive + q = makeq() + self.assertEqual(q.case_sensitive, False) + + # exists = True and samefile = False => Case sensitive + self.patcher_samefile.stop() + self.patcher_samefile.start().return_value = False + try: + q = makeq() + self.assertEqual(q.case_sensitive, True) + finally: + self.patcher_samefile.stop() + self.patcher_samefile.start().return_value = True + + # Test platform-aware default sensitivity when the library path + # does not exist. For the duration of this check, we change the + # `os.path.exists` mock to return False. + self.patcher_exists.stop() + self.patcher_exists.start().return_value = False + try: + with _common.system_mock('Darwin'): + q = makeq() + self.assertEqual(q.case_sensitive, True) + + with _common.system_mock('Windows'): + q = makeq() + self.assertEqual(q.case_sensitive, False) + finally: + # Restore the `os.path.exists` mock to its original state. + self.patcher_exists.stop() + self.patcher_exists.start().return_value = True + + @patch('beets.library.os') + def test_path_sep_detection(self, mock_os): + mock_os.sep = '/' + mock_os.altsep = None + mock_os.path.exists = lambda p: True + is_path = beets.library.PathQuery.is_path_query + + self.assertTrue(is_path('/foo/bar')) + self.assertTrue(is_path('foo/bar')) + self.assertTrue(is_path('foo/')) + self.assertFalse(is_path('foo')) + self.assertTrue(is_path('foo/:bar')) + self.assertFalse(is_path('foo:bar/')) + self.assertFalse(is_path('foo:/bar')) + + def test_detect_absolute_path(self): + if platform.system() == 'Windows': + # Because the absolute path begins with something like C:, we + # can't disambiguate it from an ordinary query. + self.skipTest('Windows absolute paths do not work as queries') + + # Don't patch `os.path.exists`; we'll actually create a file when + # it exists. + self.patcher_exists.stop() + is_path = beets.library.PathQuery.is_path_query + + try: + path = self.touch(os.path.join(b'foo', b'bar')) + + # The file itself. + self.assertTrue(is_path(path)) + + # The parent directory. + parent = os.path.dirname(path) + self.assertTrue(is_path(parent)) + + # Some non-existent path. + self.assertFalse(is_path(path + u'baz')) + + finally: + # Restart the `os.path.exists` patch. + self.patcher_exists.start() + + def test_detect_relative_path(self): + self.patcher_exists.stop() + is_path = beets.library.PathQuery.is_path_query + + try: + self.touch(os.path.join(b'foo', b'bar')) + + # Temporarily change directory so relative paths work. + cur_dir = os.getcwd() + try: + os.chdir(self.temp_dir) + self.assertTrue(is_path(u'foo/')) + self.assertTrue(is_path(u'foo/bar')) + self.assertTrue(is_path(u'foo/bar:tagada')) + self.assertFalse(is_path(u'bar')) + finally: + os.chdir(cur_dir) + + finally: + self.patcher_exists.start() class IntQueryTest(unittest.TestCase, TestHelper): @@ -400,32 +647,32 @@ def test_exact_value_match(self): item = self.add_item(bpm=120) - matched = self.lib.items('bpm:120').get() + matched = self.lib.items(u'bpm:120').get() self.assertEqual(item.id, matched.id) def test_range_match(self): item = self.add_item(bpm=120) self.add_item(bpm=130) - matched = self.lib.items('bpm:110..125') + matched = self.lib.items(u'bpm:110..125') self.assertEqual(1, len(matched)) self.assertEqual(item.id, matched.get().id) def test_flex_range_match(self): Item._types = {'myint': types.Integer()} item = self.add_item(myint=2) - matched = self.lib.items('myint:2').get() + matched = self.lib.items(u'myint:2').get() self.assertEqual(item.id, matched.id) def test_flex_dont_match_missing(self): Item._types = {'myint': types.Integer()} self.add_item() - matched = self.lib.items('myint:2').get() + matched = self.lib.items(u'myint:2').get() self.assertIsNone(matched) def test_no_substring_match(self): self.add_item(bpm=120) - matched = self.lib.items('bpm:12').get() + matched = self.lib.items(u'bpm:12').get() self.assertIsNone(matched) @@ -441,35 +688,35 @@ def test_parse_true(self): item_true = self.add_item(comp=True) item_false = self.add_item(comp=False) - matched = self.lib.items('comp:true') + matched = self.lib.items(u'comp:true') self.assertInResult(item_true, matched) self.assertNotInResult(item_false, matched) def test_flex_parse_true(self): item_true = self.add_item(flexbool=True) item_false = self.add_item(flexbool=False) - matched = self.lib.items('flexbool:true') + matched = self.lib.items(u'flexbool:true') self.assertInResult(item_true, matched) self.assertNotInResult(item_false, matched) def test_flex_parse_false(self): item_true = self.add_item(flexbool=True) item_false = self.add_item(flexbool=False) - matched = self.lib.items('flexbool:false') + matched = self.lib.items(u'flexbool:false') self.assertInResult(item_false, matched) self.assertNotInResult(item_true, matched) def test_flex_parse_1(self): item_true = self.add_item(flexbool=True) item_false = self.add_item(flexbool=False) - matched = self.lib.items('flexbool:1') + matched = self.lib.items(u'flexbool:1') self.assertInResult(item_true, matched) self.assertNotInResult(item_false, matched) def test_flex_parse_0(self): item_true = self.add_item(flexbool=True) item_false = self.add_item(flexbool=False) - matched = self.lib.items('flexbool:0') + matched = self.lib.items(u'flexbool:0') self.assertInResult(item_false, matched) self.assertNotInResult(item_true, matched) @@ -477,27 +724,27 @@ # TODO this should be the other way around item_true = self.add_item(flexbool=True) item_false = self.add_item(flexbool=False) - matched = self.lib.items('flexbool:something') + matched = self.lib.items(u'flexbool:something') self.assertInResult(item_false, matched) self.assertNotInResult(item_true, matched) class DefaultSearchFieldsTest(DummyDataTestCase): def test_albums_matches_album(self): - albums = list(self.lib.albums('baz')) + albums = list(self.lib.albums(u'baz')) self.assertEqual(len(albums), 1) def test_albums_matches_albumartist(self): - albums = list(self.lib.albums(['album artist'])) + albums = list(self.lib.albums([u'album artist'])) self.assertEqual(len(albums), 1) def test_items_matches_title(self): - items = self.lib.items('beets') - self.assert_matched(items, ['beets 4 eva']) + items = self.lib.items(u'beets') + self.assert_items_matched(items, [u'beets 4 eva']) def test_items_does_not_match_year(self): - items = self.lib.items('2001') - self.assert_matched(items, []) + items = self.lib.items(u'2001') + self.assert_items_matched(items, []) class NoneQueryTest(unittest.TestCase, TestHelper): @@ -509,21 +756,260 @@ singleton = self.add_item() album_item = self.add_album().items().get() - matched = self.lib.items(NoneQuery('album_id')) + matched = self.lib.items(NoneQuery(u'album_id')) self.assertInResult(singleton, matched) self.assertNotInResult(album_item, matched) def test_match_after_set_none(self): item = self.add_item(rg_track_gain=0) - matched = self.lib.items(NoneQuery('rg_track_gain')) + matched = self.lib.items(NoneQuery(u'rg_track_gain')) self.assertNotInResult(item, matched) item['rg_track_gain'] = None item.store() - matched = self.lib.items(NoneQuery('rg_track_gain')) + matched = self.lib.items(NoneQuery(u'rg_track_gain')) self.assertInResult(item, matched) +class NotQueryMatchTest(_common.TestCase): + """Test `query.NotQuery` matching against a single item, using the same + cases and assertions as on `MatchTest`, plus assertion on the negated + queries (ie. assertTrue(q) -> assertFalse(NotQuery(q))). + """ + def setUp(self): + super(NotQueryMatchTest, self).setUp() + self.item = _common.item() + + def test_regex_match_positive(self): + q = dbcore.query.RegexpQuery(u'album', u'^the album$') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_regex_match_negative(self): + q = dbcore.query.RegexpQuery(u'album', u'^album$') + self.assertFalse(q.match(self.item)) + self.assertTrue(dbcore.query.NotQuery(q).match(self.item)) + + def test_regex_match_non_string_value(self): + q = dbcore.query.RegexpQuery(u'disc', u'^6$') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_substring_match_positive(self): + q = dbcore.query.SubstringQuery(u'album', u'album') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_substring_match_negative(self): + q = dbcore.query.SubstringQuery(u'album', u'ablum') + self.assertFalse(q.match(self.item)) + self.assertTrue(dbcore.query.NotQuery(q).match(self.item)) + + def test_substring_match_non_string_value(self): + q = dbcore.query.SubstringQuery(u'disc', u'6') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_year_match_positive(self): + q = dbcore.query.NumericQuery(u'year', u'1') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_year_match_negative(self): + q = dbcore.query.NumericQuery(u'year', u'10') + self.assertFalse(q.match(self.item)) + self.assertTrue(dbcore.query.NotQuery(q).match(self.item)) + + def test_bitrate_range_positive(self): + q = dbcore.query.NumericQuery(u'bitrate', u'100000..200000') + self.assertTrue(q.match(self.item)) + self.assertFalse(dbcore.query.NotQuery(q).match(self.item)) + + def test_bitrate_range_negative(self): + q = dbcore.query.NumericQuery(u'bitrate', u'200000..300000') + self.assertFalse(q.match(self.item)) + self.assertTrue(dbcore.query.NotQuery(q).match(self.item)) + + def test_open_range(self): + q = dbcore.query.NumericQuery(u'bitrate', u'100000..') + dbcore.query.NotQuery(q) + + +class NotQueryTest(DummyDataTestCase): + """Test `query.NotQuery` against the dummy data: + - `test_type_xxx`: tests for the negation of a particular XxxQuery class. + - `test_get_yyy`: tests on query strings (similar to `GetTest`) + """ + def assertNegationProperties(self, q): # noqa + """Given a Query `q`, assert that: + - q OR not(q) == all items + - q AND not(q) == 0 + - not(not(q)) == q + """ + not_q = dbcore.query.NotQuery(q) + # assert using OrQuery, AndQuery + q_or = dbcore.query.OrQuery([q, not_q]) + q_and = dbcore.query.AndQuery([q, not_q]) + self.assert_items_matched_all(self.lib.items(q_or)) + self.assert_items_matched(self.lib.items(q_and), []) + + # assert manually checking the item titles + all_titles = set([i.title for i in self.lib.items()]) + q_results = set([i.title for i in self.lib.items(q)]) + not_q_results = set([i.title for i in self.lib.items(not_q)]) + self.assertEqual(q_results.union(not_q_results), all_titles) + self.assertEqual(q_results.intersection(not_q_results), set()) + + # round trip + not_not_q = dbcore.query.NotQuery(not_q) + self.assertEqual(set([i.title for i in self.lib.items(q)]), + set([i.title for i in self.lib.items(not_not_q)])) + + def test_type_and(self): + # not(a and b) <-> not(a) or not(b) + q = dbcore.query.AndQuery([ + dbcore.query.BooleanQuery(u'comp', True), + dbcore.query.NumericQuery(u'year', u'2002')], + ) + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'foo bar', u'beets 4 eva']) + self.assertNegationProperties(q) + + def test_type_anyfield(self): + q = dbcore.query.AnyFieldQuery(u'foo', [u'title', u'artist', u'album'], + dbcore.query.SubstringQuery) + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'baz qux']) + self.assertNegationProperties(q) + + def test_type_boolean(self): + q = dbcore.query.BooleanQuery(u'comp', True) + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'beets 4 eva']) + self.assertNegationProperties(q) + + def test_type_date(self): + q = dbcore.query.DateQuery(u'mtime', u'0.0') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, []) + self.assertNegationProperties(q) + + def test_type_false(self): + q = dbcore.query.FalseQuery() + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched_all(not_results) + self.assertNegationProperties(q) + + def test_type_match(self): + q = dbcore.query.MatchQuery(u'year', u'2003') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'foo bar', u'baz qux']) + self.assertNegationProperties(q) + + def test_type_none(self): + q = dbcore.query.NoneQuery(u'rg_track_gain') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, []) + self.assertNegationProperties(q) + + def test_type_numeric(self): + q = dbcore.query.NumericQuery(u'year', u'2001..2002') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'beets 4 eva']) + self.assertNegationProperties(q) + + def test_type_or(self): + # not(a or b) <-> not(a) and not(b) + q = dbcore.query.OrQuery([dbcore.query.BooleanQuery(u'comp', True), + dbcore.query.NumericQuery(u'year', u'2002')]) + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'beets 4 eva']) + self.assertNegationProperties(q) + + def test_type_regexp(self): + q = dbcore.query.RegexpQuery(u'artist', u'^t') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'foo bar']) + self.assertNegationProperties(q) + + def test_type_substring(self): + q = dbcore.query.SubstringQuery(u'album', u'ba') + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, [u'beets 4 eva']) + self.assertNegationProperties(q) + + def test_type_true(self): + q = dbcore.query.TrueQuery() + not_results = self.lib.items(dbcore.query.NotQuery(q)) + self.assert_items_matched(not_results, []) + self.assertNegationProperties(q) + + def test_get_prefixes_keyed(self): + """Test both negation prefixes on a keyed query.""" + q0 = u'-title:qux' + q1 = u'^title:qux' + results0 = self.lib.items(q0) + results1 = self.lib.items(q1) + self.assert_items_matched(results0, [u'foo bar', u'beets 4 eva']) + self.assert_items_matched(results1, [u'foo bar', u'beets 4 eva']) + + def test_get_prefixes_unkeyed(self): + """Test both negation prefixes on an unkeyed query.""" + q0 = u'-qux' + q1 = u'^qux' + results0 = self.lib.items(q0) + results1 = self.lib.items(q1) + self.assert_items_matched(results0, [u'foo bar', u'beets 4 eva']) + self.assert_items_matched(results1, [u'foo bar', u'beets 4 eva']) + + def test_get_one_keyed_regexp(self): + q = u'-artist::t.+r' + results = self.lib.items(q) + self.assert_items_matched(results, [u'foo bar', u'baz qux']) + + def test_get_one_unkeyed_regexp(self): + q = u'-:x$' + results = self.lib.items(q) + self.assert_items_matched(results, [u'foo bar', u'beets 4 eva']) + + def test_get_multiple_terms(self): + q = u'baz -bar' + results = self.lib.items(q) + self.assert_items_matched(results, [u'baz qux']) + + def test_get_mixed_terms(self): + q = u'baz -title:bar' + results = self.lib.items(q) + self.assert_items_matched(results, [u'baz qux']) + + def test_fast_vs_slow(self): + """Test that the results are the same regardless of the `fast` flag + for negated `FieldQuery`s. + + TODO: investigate NoneQuery(fast=False), as it is raising + AttributeError: type object 'NoneQuery' has no attribute 'field' + at NoneQuery.match() (due to being @classmethod, and no self?) + """ + classes = [(dbcore.query.DateQuery, [u'mtime', u'0.0']), + (dbcore.query.MatchQuery, [u'artist', u'one']), + # (dbcore.query.NoneQuery, ['rg_track_gain']), + (dbcore.query.NumericQuery, [u'year', u'2002']), + (dbcore.query.StringFieldQuery, [u'year', u'2001']), + (dbcore.query.RegexpQuery, [u'album', u'^.a']), + (dbcore.query.SubstringQuery, [u'title', u'x'])] + + for klass, args in classes: + q_fast = dbcore.query.NotQuery(klass(*(args + [True]))) + q_slow = dbcore.query.NotQuery(klass(*(args + [False]))) + + try: + self.assertEqual([i.title for i in self.lib.items(q_fast)], + [i.title for i in self.lib.items(q_slow)]) + except NotImplementedError: + # ignore classes that do not provide `fast` implementation + pass + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_replaygain.py beets-1.3.19/test/test_replaygain.py --- beets-1.3.8+dfsg/test/test_replaygain.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_replaygain.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Thomas Scholtes +# Copyright 2016, Thomas Scholtes # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,16 +14,21 @@ # included in all copies or substantial portions of the Software. -from _common import unittest -from helper import TestHelper, has_program +from __future__ import division, absolute_import, print_function +from test._common import unittest +from test.helper import TestHelper, has_program + +from beets import config from beets.mediafile import MediaFile +from beetsplug.replaygain import (FatalGstreamerPluginReplayGainError, + GStreamerBackend) try: import gi gi.require_version('Gst', '1.0') GST_AVAILABLE = True -except ImportError, ValueError: +except (ImportError, ValueError): GST_AVAILABLE = False if any(has_program(cmd, ['-v']) for cmd in ['mp3gain', 'aacgain']): @@ -30,20 +36,34 @@ else: GAIN_PROG_AVAILABLE = False +if has_program('bs1770gain', ['--replaygain']): + LOUDNESS_PROG_AVAILABLE = True +else: + LOUDNESS_PROG_AVAILABLE = False + class ReplayGainCliTestBase(TestHelper): def setUp(self): self.setup_beets() + self.config['replaygain']['backend'] = self.backend try: self.load_plugins('replaygain') except: - self.teardown_beets() - self.unload_plugins() - raise + import sys + # store exception info so an error in teardown does not swallow it + exc_info = sys.exc_info() + try: + self.teardown_beets() + self.unload_plugins() + except: + # if load_plugins() failed then setup is incomplete and + # teardown operations may fail. In particular # {Item,Album} + # may not have the _original_types attribute in unload_plugins + pass + raise exc_info[1], None, exc_info[2] - self.config['replaygain']['backend'] = self.backend album = self.add_album_fixture(2) for item in album.items(): self._reset_replaygain(item) @@ -69,6 +89,13 @@ self.assertIsNone(mediafile.rg_track_gain) self.run_command('replaygain') + + # Skip the test if rg_track_peak and rg_track gain is None, assuming + # that it could only happen if the decoder plugins are missing. + if all(i.rg_track_peak is None and i.rg_track_gain is None + for i in self.lib.items()): + self.skipTest(u'decoder plugins could not be loaded.') + for item in self.lib.items(): self.assertIsNotNone(item.rg_track_peak) self.assertIsNotNone(item.rg_track_gain) @@ -79,11 +106,11 @@ mediafile.rg_track_gain, item.rg_track_gain, places=2) def test_cli_skips_calculated_tracks(self): - self.run_command('replaygain') + self.run_command(u'replaygain') item = self.lib.items()[0] peak = item.rg_track_peak item.rg_track_gain = 0.0 - self.run_command('replaygain') + self.run_command(u'replaygain') self.assertEqual(item.rg_track_gain, 0.0) self.assertEqual(item.rg_track_peak, peak) @@ -93,7 +120,7 @@ self.assertIsNone(mediafile.rg_album_peak) self.assertIsNone(mediafile.rg_album_gain) - self.run_command('replaygain', '-a') + self.run_command(u'replaygain', u'-a') peaks = [] gains = [] @@ -110,16 +137,33 @@ self.assertNotEqual(max(peaks), 0.0) -@unittest.skipIf(not GST_AVAILABLE, 'gstreamer cannot be found') +@unittest.skipIf(not GST_AVAILABLE, u'gstreamer cannot be found') class ReplayGainGstCliTest(ReplayGainCliTestBase, unittest.TestCase): backend = u'gstreamer' + def setUp(self): + try: + # Check if required plugins can be loaded by instantiating a + # GStreamerBackend (via its .__init__). + config['replaygain']['targetlevel'] = 89 + GStreamerBackend(config['replaygain'], None) + except FatalGstreamerPluginReplayGainError as e: + # Skip the test if plugins could not be loaded. + self.skipTest(str(e)) + + super(ReplayGainGstCliTest, self).setUp() -@unittest.skipIf(not GAIN_PROG_AVAILABLE, 'no *gain command found') + +@unittest.skipIf(not GAIN_PROG_AVAILABLE, u'no *gain command found') class ReplayGainCmdCliTest(ReplayGainCliTestBase, unittest.TestCase): backend = u'command' +@unittest.skipIf(not LOUDNESS_PROG_AVAILABLE, u'bs1770gain cannot be found') +class ReplayGainLdnsCliTest(ReplayGainCliTestBase, unittest.TestCase): + backend = u'bs1770gain' + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_smartplaylist.py beets-1.3.19/test/test_smartplaylist.py --- beets-1.3.8+dfsg/test/test_smartplaylist.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_smartplaylist.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Bruno Cauet. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +from os import path, remove +from tempfile import mkdtemp +from shutil import rmtree + +from mock import Mock, MagicMock + +from beetsplug.smartplaylist import SmartPlaylistPlugin +from beets.library import Item, Album, parse_query_string +from beets.dbcore import OrQuery +from beets.dbcore.query import NullSort, MultipleSort, FixedFieldSort +from beets.util import syspath, bytestring_path +from beets.ui import UserError +from beets import config + +from test._common import unittest +from test.helper import TestHelper + + +class SmartPlaylistTest(unittest.TestCase): + def test_build_queries(self): + spl = SmartPlaylistPlugin() + self.assertEqual(spl._matched_playlists, None) + self.assertEqual(spl._unmatched_playlists, None) + + config['smartplaylist']['playlists'].set([]) + spl.build_queries() + self.assertEqual(spl._matched_playlists, set()) + self.assertEqual(spl._unmatched_playlists, set()) + + config['smartplaylist']['playlists'].set([ + {'name': u'foo', + 'query': u'FOO foo'}, + {'name': u'bar', + 'album_query': [u'BAR bar1', u'BAR bar2']}, + {'name': u'baz', + 'query': u'BAZ baz', + 'album_query': u'BAZ baz'} + ]) + spl.build_queries() + self.assertEqual(spl._matched_playlists, set()) + foo_foo = parse_query_string(u'FOO foo', Item) + baz_baz = parse_query_string(u'BAZ baz', Item) + baz_baz2 = parse_query_string(u'BAZ baz', Album) + bar_bar = OrQuery((parse_query_string(u'BAR bar1', Album)[0], + parse_query_string(u'BAR bar2', Album)[0])) + self.assertEqual(spl._unmatched_playlists, set([ + (u'foo', foo_foo, (None, None)), + (u'baz', baz_baz, baz_baz2), + (u'bar', (None, None), (bar_bar, None)), + ])) + + def test_build_queries_with_sorts(self): + spl = SmartPlaylistPlugin() + config['smartplaylist']['playlists'].set([ + {'name': u'no_sort', + 'query': u'foo'}, + {'name': u'one_sort', + 'query': u'foo year+'}, + {'name': u'only_empty_sorts', + 'query': [u'foo', u'bar']}, + {'name': u'one_non_empty_sort', + 'query': [u'foo year+', u'bar']}, + {'name': u'multiple_sorts', + 'query': [u'foo year+', u'bar genre-']}, + {'name': u'mixed', + 'query': [u'foo year+', u'bar', u'baz genre+ id-']} + ]) + + spl.build_queries() + sorts = dict((name, sort) + for name, (_, sort), _ in spl._unmatched_playlists) + + asseq = self.assertEqual # less cluttered code + sort = FixedFieldSort # short cut since we're only dealing with this + asseq(sorts["no_sort"], NullSort()) + asseq(sorts["one_sort"], sort(u'year')) + asseq(sorts["only_empty_sorts"], None) + asseq(sorts["one_non_empty_sort"], sort(u'year')) + asseq(sorts["multiple_sorts"], + MultipleSort([sort('year'), sort(u'genre', False)])) + asseq(sorts["mixed"], + MultipleSort([sort('year'), sort(u'genre'), sort(u'id', False)])) + + def test_matches(self): + spl = SmartPlaylistPlugin() + + a = MagicMock(Album) + i = MagicMock(Item) + + self.assertFalse(spl.matches(i, None, None)) + self.assertFalse(spl.matches(a, None, None)) + + query = Mock() + query.match.side_effect = {i: True}.__getitem__ + self.assertTrue(spl.matches(i, query, None)) + self.assertFalse(spl.matches(a, query, None)) + + a_query = Mock() + a_query.match.side_effect = {a: True}.__getitem__ + self.assertFalse(spl.matches(i, None, a_query)) + self.assertTrue(spl.matches(a, None, a_query)) + + self.assertTrue(spl.matches(i, query, a_query)) + self.assertTrue(spl.matches(a, query, a_query)) + + def test_db_changes(self): + spl = SmartPlaylistPlugin() + + nones = None, None + pl1 = '1', (u'q1', None), nones + pl2 = '2', (u'q2', None), nones + pl3 = '3', (u'q3', None), nones + + spl._unmatched_playlists = set([pl1, pl2, pl3]) + spl._matched_playlists = set() + + spl.matches = Mock(return_value=False) + spl.db_change(None, u"nothing") + self.assertEqual(spl._unmatched_playlists, set([pl1, pl2, pl3])) + self.assertEqual(spl._matched_playlists, set()) + + spl.matches.side_effect = lambda _, q, __: q == u'q3' + spl.db_change(None, u"matches 3") + self.assertEqual(spl._unmatched_playlists, set([pl1, pl2])) + self.assertEqual(spl._matched_playlists, set([pl3])) + + spl.matches.side_effect = lambda _, q, __: q == u'q1' + spl.db_change(None, u"matches 3") + self.assertEqual(spl._matched_playlists, set([pl1, pl3])) + self.assertEqual(spl._unmatched_playlists, set([pl2])) + + def test_playlist_update(self): + spl = SmartPlaylistPlugin() + + i = Mock(path=b'/tagada.mp3') + i.evaluate_template.side_effect = lambda x, _: x + q = Mock() + a_q = Mock() + lib = Mock() + lib.items.return_value = [i] + lib.albums.return_value = [] + pl = b'my_playlist.m3u', (q, None), (a_q, None) + spl._matched_playlists = [pl] + + dir = bytestring_path(mkdtemp()) + config['smartplaylist']['relative_to'] = False + config['smartplaylist']['playlist_dir'] = dir + try: + spl.update_playlists(lib) + except Exception: + rmtree(dir) + raise + + lib.items.assert_called_once_with(q, None) + lib.albums.assert_called_once_with(a_q, None) + + m3u_filepath = path.join(dir, pl[0]) + self.assertTrue(path.exists(m3u_filepath)) + with open(syspath(m3u_filepath), 'rb') as f: + content = f.read() + rmtree(dir) + + self.assertEqual(content, b'/tagada.mp3\n') + + +class SmartPlaylistCLITest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + + self.item = self.add_item() + config['smartplaylist']['playlists'].set([ + {'name': 'my_playlist.m3u', + 'query': self.item.title}, + {'name': 'all.m3u', + 'query': u''} + ]) + config['smartplaylist']['playlist_dir'].set(self.temp_dir) + self.load_plugins('smartplaylist') + + def tearDown(self): + self.unload_plugins() + self.teardown_beets() + + def test_splupdate(self): + with self.assertRaises(UserError): + self.run_with_output(u'splupdate', u'tagada') + + self.run_with_output(u'splupdate', u'my_playlist') + m3u_path = path.join(self.temp_dir, b'my_playlist.m3u') + self.assertTrue(path.exists(m3u_path)) + with open(m3u_path, 'rb') as f: + self.assertEqual(f.read(), self.item.path + b"\n") + remove(m3u_path) + + self.run_with_output(u'splupdate', u'my_playlist.m3u') + with open(m3u_path, 'rb') as f: + self.assertEqual(f.read(), self.item.path + b"\n") + remove(m3u_path) + + self.run_with_output(u'splupdate') + for name in (b'my_playlist.m3u', b'all.m3u'): + with open(path.join(self.temp_dir, name), 'rb') as f: + self.assertEqual(f.read(), self.item.path + b"\n") + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_sort.py beets-1.3.19/test/test_sort.py --- beets-1.3.8+dfsg/test/test_sort.py 2014-09-16 21:45:19.000000000 +0000 +++ beets-1.3.19/test/test_sort.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,8 +15,10 @@ """Various tests for querying the library database. """ -import _common -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test import _common +from test._common import unittest import beets.library from beets import dbcore from beets import config @@ -29,158 +32,175 @@ self.lib = beets.library.Library(':memory:') albums = [_common.album() for _ in range(3)] - albums[0].album = "album A" - albums[0].genre = "Rock" - albums[0].year = "2001" - albums[0].flex1 = "flex1-1" - albums[0].flex2 = "flex2-A" - albums[0].albumartist = "foo" + albums[0].album = u"Album A" + albums[0].genre = u"Rock" + albums[0].year = 2001 + albums[0].flex1 = u"Flex1-1" + albums[0].flex2 = u"Flex2-A" + albums[0].albumartist = u"Foo" albums[0].albumartist_sort = None - albums[1].album = "album B" - albums[1].genre = "Rock" - albums[1].year = "2001" - albums[1].flex1 = "flex1-2" - albums[1].flex2 = "flex2-A" - albums[1].albumartist = "bar" + albums[1].album = u"Album B" + albums[1].genre = u"Rock" + albums[1].year = 2001 + albums[1].flex1 = u"Flex1-2" + albums[1].flex2 = u"Flex2-A" + albums[1].albumartist = u"Bar" albums[1].albumartist_sort = None - albums[2].album = "album C" - albums[2].genre = "Jazz" - albums[2].year = "2005" - albums[2].flex1 = "flex1-1" - albums[2].flex2 = "flex2-B" - albums[2].albumartist = "baz" + albums[2].album = u"Album C" + albums[2].genre = u"Jazz" + albums[2].year = 2005 + albums[2].flex1 = u"Flex1-1" + albums[2].flex2 = u"Flex2-B" + albums[2].albumartist = u"Baz" albums[2].albumartist_sort = None for album in albums: self.lib.add(album) items = [_common.item() for _ in range(4)] - items[0].title = 'foo bar' - items[0].artist = 'one' - items[0].album = 'baz' + items[0].title = u'Foo bar' + items[0].artist = u'One' + items[0].album = u'Baz' items[0].year = 2001 items[0].comp = True - items[0].flex1 = "flex1-0" - items[0].flex2 = "flex2-A" + items[0].flex1 = u"Flex1-0" + items[0].flex2 = u"Flex2-A" items[0].album_id = albums[0].id items[0].artist_sort = None - items[1].title = 'baz qux' - items[1].artist = 'two' - items[1].album = 'baz' + items[0].path = "/path0.mp3" + items[0].track = 1 + items[1].title = u'Baz qux' + items[1].artist = u'Two' + items[1].album = u'Baz' items[1].year = 2002 items[1].comp = True - items[1].flex1 = "flex1-1" - items[1].flex2 = "flex2-A" + items[1].flex1 = u"Flex1-1" + items[1].flex2 = u"Flex2-A" items[1].album_id = albums[0].id items[1].artist_sort = None - items[2].title = 'beets 4 eva' - items[2].artist = 'three' - items[2].album = 'foo' + items[1].path = "/patH1.mp3" + items[1].track = 2 + items[2].title = u'Beets 4 eva' + items[2].artist = u'Three' + items[2].album = u'Foo' items[2].year = 2003 items[2].comp = False - items[2].flex1 = "flex1-2" - items[2].flex2 = "flex1-B" + items[2].flex1 = u"Flex1-2" + items[2].flex2 = u"Flex1-B" items[2].album_id = albums[1].id items[2].artist_sort = None - items[3].title = 'beets 4 eva' - items[3].artist = 'three' - items[3].album = 'foo2' + items[2].path = "/paTH2.mp3" + items[2].track = 3 + items[3].title = u'Beets 4 eva' + items[3].artist = u'Three' + items[3].album = u'Foo2' items[3].year = 2004 items[3].comp = False - items[3].flex1 = "flex1-2" - items[3].flex2 = "flex1-C" + items[3].flex1 = u"Flex1-2" + items[3].flex2 = u"Flex1-C" items[3].album_id = albums[2].id items[3].artist_sort = None + items[3].path = "/PATH3.mp3" + items[3].track = 4 for item in items: self.lib.add(item) class SortFixedFieldTest(DummyDataTestCase): def test_sort_asc(self): - q = '' - sort = dbcore.query.FixedFieldSort("year", True) + q = u'' + sort = dbcore.query.FixedFieldSort(u"year", True) results = self.lib.items(q, sort) self.assertLessEqual(results[0]['year'], results[1]['year']) self.assertEqual(results[0]['year'], 2001) # same thing with query string - q = 'year+' + q = u'year+' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_desc(self): - q = '' - sort = dbcore.query.FixedFieldSort("year", False) + q = u'' + sort = dbcore.query.FixedFieldSort(u"year", False) results = self.lib.items(q, sort) self.assertGreaterEqual(results[0]['year'], results[1]['year']) self.assertEqual(results[0]['year'], 2004) # same thing with query string - q = 'year-' + q = u'year-' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_two_field_asc(self): - q = '' - s1 = dbcore.query.FixedFieldSort("album", True) - s2 = dbcore.query.FixedFieldSort("year", True) + q = u'' + s1 = dbcore.query.FixedFieldSort(u"album", True) + s2 = dbcore.query.FixedFieldSort(u"year", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) results = self.lib.items(q, sort) self.assertLessEqual(results[0]['album'], results[1]['album']) self.assertLessEqual(results[1]['album'], results[2]['album']) - self.assertEqual(results[0]['album'], 'baz') - self.assertEqual(results[1]['album'], 'baz') + self.assertEqual(results[0]['album'], u'Baz') + self.assertEqual(results[1]['album'], u'Baz') self.assertLessEqual(results[0]['year'], results[1]['year']) # same thing with query string - q = 'album+ year+' + q = u'album+ year+' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) + def test_sort_path_field(self): + q = u'' + sort = dbcore.query.FixedFieldSort('path', True) + results = self.lib.items(q, sort) + self.assertEqual(results[0]['path'], b'/path0.mp3') + self.assertEqual(results[1]['path'], b'/patH1.mp3') + self.assertEqual(results[2]['path'], b'/paTH2.mp3') + self.assertEqual(results[3]['path'], b'/PATH3.mp3') + class SortFlexFieldTest(DummyDataTestCase): def test_sort_asc(self): - q = '' - sort = dbcore.query.SlowFieldSort("flex1", True) + q = u'' + sort = dbcore.query.SlowFieldSort(u"flex1", True) results = self.lib.items(q, sort) self.assertLessEqual(results[0]['flex1'], results[1]['flex1']) - self.assertEqual(results[0]['flex1'], 'flex1-0') + self.assertEqual(results[0]['flex1'], u'Flex1-0') # same thing with query string - q = 'flex1+' + q = u'flex1+' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_desc(self): - q = '' - sort = dbcore.query.SlowFieldSort("flex1", False) + q = u'' + sort = dbcore.query.SlowFieldSort(u"flex1", False) results = self.lib.items(q, sort) self.assertGreaterEqual(results[0]['flex1'], results[1]['flex1']) self.assertGreaterEqual(results[1]['flex1'], results[2]['flex1']) self.assertGreaterEqual(results[2]['flex1'], results[3]['flex1']) - self.assertEqual(results[0]['flex1'], 'flex1-2') + self.assertEqual(results[0]['flex1'], u'Flex1-2') # same thing with query string - q = 'flex1-' + q = u'flex1-' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_two_field(self): - q = '' - s1 = dbcore.query.SlowFieldSort("flex2", False) - s2 = dbcore.query.SlowFieldSort("flex1", True) + q = u'' + s1 = dbcore.query.SlowFieldSort(u"flex2", False) + s2 = dbcore.query.SlowFieldSort(u"flex1", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) results = self.lib.items(q, sort) self.assertGreaterEqual(results[0]['flex2'], results[1]['flex2']) self.assertGreaterEqual(results[1]['flex2'], results[2]['flex2']) - self.assertEqual(results[0]['flex2'], 'flex2-A') - self.assertEqual(results[1]['flex2'], 'flex2-A') + self.assertEqual(results[0]['flex2'], u'Flex2-A') + self.assertEqual(results[1]['flex2'], u'Flex2-A') self.assertLessEqual(results[0]['flex1'], results[1]['flex1']) # same thing with query string - q = 'flex2- flex1+' + q = u'flex2- flex1+' results2 = self.lib.items(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) @@ -188,44 +208,44 @@ class SortAlbumFixedFieldTest(DummyDataTestCase): def test_sort_asc(self): - q = '' - sort = dbcore.query.FixedFieldSort("year", True) + q = u'' + sort = dbcore.query.FixedFieldSort(u"year", True) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['year'], results[1]['year']) self.assertEqual(results[0]['year'], 2001) # same thing with query string - q = 'year+' + q = u'year+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_desc(self): - q = '' - sort = dbcore.query.FixedFieldSort("year", False) + q = u'' + sort = dbcore.query.FixedFieldSort(u"year", False) results = self.lib.albums(q, sort) self.assertGreaterEqual(results[0]['year'], results[1]['year']) self.assertEqual(results[0]['year'], 2005) # same thing with query string - q = 'year-' + q = u'year-' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_two_field_asc(self): - q = '' - s1 = dbcore.query.FixedFieldSort("genre", True) - s2 = dbcore.query.FixedFieldSort("album", True) + q = u'' + s1 = dbcore.query.FixedFieldSort(u"genre", True) + s2 = dbcore.query.FixedFieldSort(u"album", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['genre'], results[1]['genre']) self.assertLessEqual(results[1]['genre'], results[2]['genre']) - self.assertEqual(results[1]['genre'], 'Rock') - self.assertEqual(results[2]['genre'], 'Rock') + self.assertEqual(results[1]['genre'], u'Rock') + self.assertEqual(results[2]['genre'], u'Rock') self.assertLessEqual(results[1]['album'], results[2]['album']) # same thing with query string - q = 'genre+ album+' + q = u'genre+ album+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) @@ -233,44 +253,44 @@ class SortAlbumFlexFieldTest(DummyDataTestCase): def test_sort_asc(self): - q = '' - sort = dbcore.query.SlowFieldSort("flex1", True) + q = u'' + sort = dbcore.query.SlowFieldSort(u"flex1", True) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['flex1'], results[1]['flex1']) self.assertLessEqual(results[1]['flex1'], results[2]['flex1']) # same thing with query string - q = 'flex1+' + q = u'flex1+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_desc(self): - q = '' - sort = dbcore.query.SlowFieldSort("flex1", False) + q = u'' + sort = dbcore.query.SlowFieldSort(u"flex1", False) results = self.lib.albums(q, sort) self.assertGreaterEqual(results[0]['flex1'], results[1]['flex1']) self.assertGreaterEqual(results[1]['flex1'], results[2]['flex1']) # same thing with query string - q = 'flex1-' + q = u'flex1-' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_two_field_asc(self): - q = '' - s1 = dbcore.query.SlowFieldSort("flex2", True) - s2 = dbcore.query.SlowFieldSort("flex1", True) + q = u'' + s1 = dbcore.query.SlowFieldSort(u"flex2", True) + s2 = dbcore.query.SlowFieldSort(u"flex1", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['flex2'], results[1]['flex2']) self.assertLessEqual(results[1]['flex2'], results[2]['flex2']) - self.assertEqual(results[0]['flex2'], 'flex2-A') - self.assertEqual(results[1]['flex2'], 'flex2-A') + self.assertEqual(results[0]['flex2'], u'Flex2-A') + self.assertEqual(results[1]['flex2'], u'Flex2-A') self.assertLessEqual(results[0]['flex1'], results[1]['flex1']) # same thing with query string - q = 'flex2+ flex1+' + q = u'flex2+ flex1+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) @@ -278,25 +298,25 @@ class SortAlbumComputedFieldTest(DummyDataTestCase): def test_sort_asc(self): - q = '' - sort = dbcore.query.SlowFieldSort("path", True) + q = u'' + sort = dbcore.query.SlowFieldSort(u"path", True) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['path'], results[1]['path']) self.assertLessEqual(results[1]['path'], results[2]['path']) # same thing with query string - q = 'path+' + q = u'path+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_sort_desc(self): - q = '' - sort = dbcore.query.SlowFieldSort("path", False) + q = u'' + sort = dbcore.query.SlowFieldSort(u"path", False) results = self.lib.albums(q, sort) self.assertGreaterEqual(results[0]['path'], results[1]['path']) self.assertGreaterEqual(results[1]['path'], results[2]['path']) # same thing with query string - q = 'path-' + q = u'path-' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) @@ -304,24 +324,24 @@ class SortCombinedFieldTest(DummyDataTestCase): def test_computed_first(self): - q = '' - s1 = dbcore.query.SlowFieldSort("path", True) - s2 = dbcore.query.FixedFieldSort("year", True) + q = u'' + s1 = dbcore.query.SlowFieldSort(u"path", True) + s2 = dbcore.query.FixedFieldSort(u"year", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) results = self.lib.albums(q, sort) self.assertLessEqual(results[0]['path'], results[1]['path']) self.assertLessEqual(results[1]['path'], results[2]['path']) - q = 'path+ year+' + q = u'path+ year+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) def test_computed_second(self): - q = '' - s1 = dbcore.query.FixedFieldSort("year", True) - s2 = dbcore.query.SlowFieldSort("path", True) + q = u'' + s1 = dbcore.query.FixedFieldSort(u"year", True) + s2 = dbcore.query.SlowFieldSort(u"path", True) sort = dbcore.query.MultipleSort() sort.add_sort(s1) sort.add_sort(s2) @@ -329,7 +349,7 @@ self.assertLessEqual(results[0]['year'], results[1]['year']) self.assertLessEqual(results[1]['year'], results[2]['year']) self.assertLessEqual(results[0]['path'], results[1]['path']) - q = 'year+ path+' + q = u'year+ path+' results2 = self.lib.albums(q) for r1, r2 in zip(results, results2): self.assertEqual(r1.id, r2.id) @@ -355,6 +375,163 @@ self.assertGreater(results[0].albumartist, results[1].albumartist) +class CaseSensitivityTest(DummyDataTestCase, _common.TestCase): + """If case_insensitive is false, lower-case values should be placed + after all upper-case values. E.g., `Foo Qux bar` + """ + + def setUp(self): + super(CaseSensitivityTest, self).setUp() + + album = _common.album() + album.album = u"album" + album.genre = u"alternative" + album.year = u"2001" + album.flex1 = u"flex1" + album.flex2 = u"flex2-A" + album.albumartist = u"bar" + album.albumartist_sort = None + self.lib.add(album) + + item = _common.item() + item.title = u'another' + item.artist = u'lowercase' + item.album = u'album' + item.year = 2001 + item.comp = True + item.flex1 = u"flex1" + item.flex2 = u"flex2-A" + item.album_id = album.id + item.artist_sort = None + item.track = 10 + self.lib.add(item) + + self.new_album = album + self.new_item = item + + def tearDown(self): + self.new_item.remove(delete=True) + self.new_album.remove(delete=True) + super(CaseSensitivityTest, self).tearDown() + + def test_smart_artist_case_insensitive(self): + config['sort_case_insensitive'] = True + q = u'artist+' + results = list(self.lib.items(q)) + self.assertEqual(results[0].artist, u'lowercase') + self.assertEqual(results[1].artist, u'One') + + def test_smart_artist_case_sensitive(self): + config['sort_case_insensitive'] = False + q = u'artist+' + results = list(self.lib.items(q)) + self.assertEqual(results[0].artist, u'One') + self.assertEqual(results[-1].artist, u'lowercase') + + def test_fixed_field_case_insensitive(self): + config['sort_case_insensitive'] = True + q = u'album+' + results = list(self.lib.albums(q)) + self.assertEqual(results[0].album, u'album') + self.assertEqual(results[1].album, u'Album A') + + def test_fixed_field_case_sensitive(self): + config['sort_case_insensitive'] = False + q = u'album+' + results = list(self.lib.albums(q)) + self.assertEqual(results[0].album, u'Album A') + self.assertEqual(results[-1].album, u'album') + + def test_flex_field_case_insensitive(self): + config['sort_case_insensitive'] = True + q = u'flex1+' + results = list(self.lib.items(q)) + self.assertEqual(results[0].flex1, u'flex1') + self.assertEqual(results[1].flex1, u'Flex1-0') + + def test_flex_field_case_sensitive(self): + config['sort_case_insensitive'] = False + q = u'flex1+' + results = list(self.lib.items(q)) + self.assertEqual(results[0].flex1, u'Flex1-0') + self.assertEqual(results[-1].flex1, u'flex1') + + def test_case_sensitive_only_affects_text(self): + config['sort_case_insensitive'] = True + q = u'track+' + results = list(self.lib.items(q)) + # If the numerical values were sorted as strings, + # then ['1', '10', '2'] would be valid. + print([r.track for r in results]) + self.assertEqual(results[0].track, 1) + self.assertEqual(results[1].track, 2) + self.assertEqual(results[-1].track, 10) + + +class NonExistingFieldTest(DummyDataTestCase): + """Test sorting by non-existing fields""" + + def test_non_existing_fields_not_fail(self): + qs = [u'foo+', u'foo-', u'--', u'-+', u'+-', + u'++', u'-foo-', u'-foo+', u'---'] + + q0 = u'foo+' + results0 = list(self.lib.items(q0)) + for q1 in qs: + results1 = list(self.lib.items(q1)) + for r1, r2 in zip(results0, results1): + self.assertEqual(r1.id, r2.id) + + def test_combined_non_existing_field_asc(self): + all_results = list(self.lib.items(u'id+')) + q = u'foo+ id+' + results = list(self.lib.items(q)) + self.assertEqual(len(all_results), len(results)) + for r1, r2 in zip(all_results, results): + self.assertEqual(r1.id, r2.id) + + def test_combined_non_existing_field_desc(self): + all_results = list(self.lib.items(u'id+')) + q = u'foo- id+' + results = list(self.lib.items(q)) + self.assertEqual(len(all_results), len(results)) + for r1, r2 in zip(all_results, results): + self.assertEqual(r1.id, r2.id) + + def test_field_present_in_some_items(self): + """Test ordering by a field not present on all items.""" + # append 'foo' to two to items (1,2) + items = self.lib.items(u'id+') + ids = [i.id for i in items] + items[1].foo = u'bar1' + items[2].foo = u'bar2' + items[1].store() + items[2].store() + + results_asc = list(self.lib.items(u'foo+ id+')) + self.assertEqual([i.id for i in results_asc], + # items without field first + [ids[0], ids[3], ids[1], ids[2]]) + results_desc = list(self.lib.items(u'foo- id+')) + self.assertEqual([i.id for i in results_desc], + # items without field last + [ids[2], ids[1], ids[0], ids[3]]) + + def test_negation_interaction(self): + """Test the handling of negation and sorting together. + + If a string ends with a sorting suffix, it takes precedence over the + NotQuery parsing. + """ + query, sort = beets.library.parse_query_string(u'-bar+', + beets.library.Item) + self.assertEqual(len(query.subqueries), 1) + self.assertTrue(isinstance(query.subqueries[0], + dbcore.query.TrueQuery)) + self.assertTrue(isinstance(sort, dbcore.query.SlowFieldSort)) + self.assertEqual(sort.field, u'-bar') + + def suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff -Nru beets-1.3.8+dfsg/test/test_spotify.py beets-1.3.19/test/test_spotify.py --- beets-1.3.8+dfsg/test/test_spotify.py 2014-09-16 17:43:44.000000000 +0000 +++ beets-1.3.19/test/test_spotify.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,12 +1,18 @@ +# -*- coding: utf-8 -*- + """Tests for the 'spotify' plugin""" -import _common +from __future__ import division, absolute_import, print_function + +import os import responses -from _common import unittest + +from test import _common +from test._common import unittest from beets import config from beets.library import Item from beetsplug import spotify -from helper import TestHelper +from test.helper import TestHelper import urlparse @@ -14,7 +20,7 @@ def __init__(self, mode, show_failures): self.mode = mode self.show_failures = show_failures - self.verbose = True + self.verbose = 1 def _params(url): @@ -41,164 +47,65 @@ self.assertEqual(True, self.spotify.parse_opts(opts)) def test_empty_query(self): - self.assertEqual(None, self.spotify.query_spotify(self.lib, "1=2")) + self.assertEqual(None, self.spotify.query_spotify(self.lib, u"1=2")) @responses.activate def test_missing_request(self): - response_body = str( - '{' - '"tracks" : {' - '"href" : "https://api.spotify.com/v1/search?query=duifhjslkef' - '+album%3Alkajsdflakjsd+artist%3A&offset=0&limit=20&type=track",' - '"items" : [ ],' - '"limit" : 20,' - '"next" : null,' - '"offset" : 0,' - '"previous" : null,' - '"total" : 0' - '}' - '}' - ) + json_file = os.path.join(_common.RSRC, b'spotify', + b'missing_request.json') + with open(json_file, 'rb') as f: + response_body = f.read() + responses.add(responses.GET, 'https://api.spotify.com/v1/search', body=response_body, status=200, content_type='application/json') item = Item( - mb_trackid='01234', - album='lkajsdflakjsd', - albumartist='ujydfsuihse', - title='duifhjslkef', + mb_trackid=u'01234', + album=u'lkajsdflakjsd', + albumartist=u'ujydfsuihse', + title=u'duifhjslkef', length=10 ) item.add(self.lib) - self.assertEquals([], self.spotify.query_spotify(self.lib, "")) + self.assertEqual([], self.spotify.query_spotify(self.lib, u"")) params = _params(responses.calls[0].request.url) - self.assertEquals( + self.assertEqual( params['q'], - ['duifhjslkef album:lkajsdflakjsd artist:ujydfsuihse'], + [u'duifhjslkef album:lkajsdflakjsd artist:ujydfsuihse'], ) - self.assertEquals(params['type'], ['track']) + self.assertEqual(params['type'], [u'track']) @responses.activate def test_track_request(self): - response_body = str( - '{' - '"tracks" : {' - '"href" : "https://api.spotify.com/v1/search?query=Happy+album%3A' - 'Despicable+Me+2+artist%3APharrell+Williams&offset=0&limit=20' - '&type=track",' - '"items" : [ {' - '"album" : {' - '"album_type" : "compilation",' - '"available_markets" : [ "AD", "AR", "AT", "AU", "BE", "BG",' - '"BO", "BR", "CA", "CH", "CL", "CO",' - '"CR", "CY", "CZ", "DE", "DK", "DO",' - '"EC", "EE", "ES", "FI", "FR", "GB",' - '"GR", "GT", "HK", "HN", "HU", "IE",' - '"IS", "IT", "LI", "LT", "LU", "LV",' - '"MC", "MT", "MX", "MY", "NI", "NL",' - '"NO", "NZ", "PA", "PE", "PH", "PL",' - '"PT", "PY", "RO", "SE", "SG", "SI",' - '"SK", "SV", "TR", "TW", "US", "UY" ],' - '"external_urls" : {' - '"spotify" : "https://open.spotify.com/album/' - '5l3zEmMrOhOzG8d8s83GOL"' - '},' - '"href" : "https://api.spotify.com/v1/albums/' - '5l3zEmMrOhOzG8d8s83GOL",' - '"id" : "5l3zEmMrOhOzG8d8s83GOL",' - '"images" : [ {' - '"height" : 640,' - '"url" : "https://i.scdn.co/image/cb7905340c132365bb' - 'aee3f17498f062858382e8",' - '"width" : 640' - '}, {' - '"height" : 300,' - '"url" : "https://i.scdn.co/image/af369120f0b20099' - 'd6784ab31c88256113f10ffb",' - '"width" : 300' - '}, {' - '"height" : 64,' - '"url" : "https://i.scdn.co/image/' - '9dad385ddf2e7db0bef20cec1fcbdb08689d9ae8",' - '"width" : 64' - '} ],' - '"name" : "Despicable Me 2 (Original Motion Picture Soundtrack)",' - '"type" : "album",' - '"uri" : "spotify:album:5l3zEmMrOhOzG8d8s83GOL"' - '},' - '"artists" : [ {' - '"external_urls" : {' - '"spotify" : "https://open.spotify.com/artist/' - '2RdwBSPQiwcmiDo9kixcl8"' - '},' - '"href" : "https://api.spotify.com/v1/artists/' - '2RdwBSPQiwcmiDo9kixcl8",' - '"id" : "2RdwBSPQiwcmiDo9kixcl8",' - '"name" : "Pharrell Williams",' - '"type" : "artist",' - '"uri" : "spotify:artist:2RdwBSPQiwcmiDo9kixcl8"' - '} ],' - '"available_markets" : [ "AD", "AR", "AT", "AU", "BE", "BG", "BO",' - '"BR", "CA", "CH", "CL", "CO", "CR", "CY",' - '"CZ", "DE", "DK", "DO", "EC", "EE", "ES",' - '"FI", "FR", "GB", "GR", "GT", "HK", "HN",' - '"HU", "IE", "IS", "IT", "LI", "LT", "LU",' - '"LV", "MC", "MT", "MX", "MY", "NI", "NL",' - '"NO", "NZ", "PA", "PE", "PH", "PL", "PT",' - '"PY", "RO", "SE", "SG", "SI", "SK", "SV",' - '"TR", "TW", "US", "UY" ],' - '"disc_number" : 1,' - '"duration_ms" : 233305,' - '"explicit" : false,' - '"external_ids" : {' - '"isrc" : "USQ4E1300686"' - '},' - '"external_urls" : {' - '"spotify" : "https://open.spotify.com/track/' - '6NPVjNh8Jhru9xOmyQigds"' - '},' - '"href" : "https://api.spotify.com/v1/tracks/' - '6NPVjNh8Jhru9xOmyQigds",' - '"id" : "6NPVjNh8Jhru9xOmyQigds",' - '"name" : "Happy",' - '"popularity" : 89,' - '"preview_url" : "https://p.scdn.co/mp3-preview/' - '6b00000be293e6b25f61c33e206a0c522b5cbc87",' - '"track_number" : 4,' - '"type" : "track",' - '"uri" : "spotify:track:6NPVjNh8Jhru9xOmyQigds"' - '} ],' - '"limit" : 20,' - '"next" : null,' - '"offset" : 0,' - '"previous" : null,' - '"total" : 1' - '}' - '}' - ) + + json_file = os.path.join(_common.RSRC, b'spotify', + b'track_request.json') + with open(json_file, 'rb') as f: + response_body = f.read() + responses.add(responses.GET, 'https://api.spotify.com/v1/search', body=response_body, status=200, content_type='application/json') item = Item( - mb_trackid='01234', - album='Despicable Me 2', - albumartist='Pharrell Williams', - title='Happy', + mb_trackid=u'01234', + album=u'Despicable Me 2', + albumartist=u'Pharrell Williams', + title=u'Happy', length=10 ) item.add(self.lib) - results = self.spotify.query_spotify(self.lib, "Happy") - self.assertEquals(1, len(results)) - self.assertEquals("6NPVjNh8Jhru9xOmyQigds", results[0]['id']) + results = self.spotify.query_spotify(self.lib, u"Happy") + self.assertEqual(1, len(results)) + self.assertEqual(u"6NPVjNh8Jhru9xOmyQigds", results[0]['id']) self.spotify.output_results(results) params = _params(responses.calls[0].request.url) - self.assertEquals( + self.assertEqual( params['q'], - ['Happy album:Despicable Me 2 artist:Pharrell Williams'], + [u'Happy album:Despicable Me 2 artist:Pharrell Williams'], ) - self.assertEquals(params['type'], ['track']) + self.assertEqual(params['type'], [u'track']) def suite(): diff -Nru beets-1.3.8+dfsg/test/test_template.py beets-1.3.19/test/test_template.py --- beets-1.3.8+dfsg/test/test_template.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_template.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,7 +15,11 @@ """Tests for template engine. """ -from _common import unittest +from __future__ import division, absolute_import, print_function + +import warnings + +from test._common import unittest from beets.util import functemplate @@ -207,6 +212,13 @@ self._assert_call(arg_parts[0], u"bar", 1) self.assertEqual(list(_normexpr(arg_parts[0].args[0])), [u'baz']) + def test_fail_on_utf8(self): + parts = u'é'.encode('utf8') + warnings.simplefilter("ignore") + with self.assertRaises(UnicodeDecodeError): + functemplate._parse(parts) + warnings.simplefilter("default") + class EvalTest(unittest.TestCase): def _eval(self, template): diff -Nru beets-1.3.8+dfsg/test/test_the.py beets-1.3.19/test/test_the.py --- beets-1.3.8+dfsg/test/test_the.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_the.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,7 +1,11 @@ +# -*- coding: utf-8 -*- + """Tests for the 'the' plugin""" -from _common import unittest -import _common +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test import _common from beets import config from beetsplug.the import ThePlugin, PATTERN_A, PATTERN_THE, FORMAT @@ -9,52 +13,52 @@ class ThePluginTest(_common.TestCase): def test_unthe_with_default_patterns(self): - self.assertEqual(ThePlugin().unthe('', PATTERN_THE), '') - self.assertEqual(ThePlugin().unthe('The Something', PATTERN_THE), - 'Something, The') - self.assertEqual(ThePlugin().unthe('The The', PATTERN_THE), - 'The, The') - self.assertEqual(ThePlugin().unthe('The The', PATTERN_THE), - 'The, The') - self.assertEqual(ThePlugin().unthe('The The X', PATTERN_THE), - 'The X, The') - self.assertEqual(ThePlugin().unthe('the The', PATTERN_THE), - 'The, the') - self.assertEqual(ThePlugin().unthe('Protected The', PATTERN_THE), - 'Protected The') - self.assertEqual(ThePlugin().unthe('A Boy', PATTERN_A), - 'Boy, A') - self.assertEqual(ThePlugin().unthe('a girl', PATTERN_A), - 'girl, a') - self.assertEqual(ThePlugin().unthe('An Apple', PATTERN_A), - 'Apple, An') - self.assertEqual(ThePlugin().unthe('An A Thing', PATTERN_A), - 'A Thing, An') - self.assertEqual(ThePlugin().unthe('the An Arse', PATTERN_A), - 'the An Arse') + self.assertEqual(ThePlugin().unthe(u'', PATTERN_THE), '') + self.assertEqual(ThePlugin().unthe(u'The Something', PATTERN_THE), + u'Something, The') + self.assertEqual(ThePlugin().unthe(u'The The', PATTERN_THE), + u'The, The') + self.assertEqual(ThePlugin().unthe(u'The The', PATTERN_THE), + u'The, The') + self.assertEqual(ThePlugin().unthe(u'The The X', PATTERN_THE), + u'The X, The') + self.assertEqual(ThePlugin().unthe(u'the The', PATTERN_THE), + u'The, the') + self.assertEqual(ThePlugin().unthe(u'Protected The', PATTERN_THE), + u'Protected The') + self.assertEqual(ThePlugin().unthe(u'A Boy', PATTERN_A), + u'Boy, A') + self.assertEqual(ThePlugin().unthe(u'a girl', PATTERN_A), + u'girl, a') + self.assertEqual(ThePlugin().unthe(u'An Apple', PATTERN_A), + u'Apple, An') + self.assertEqual(ThePlugin().unthe(u'An A Thing', PATTERN_A), + u'A Thing, An') + self.assertEqual(ThePlugin().unthe(u'the An Arse', PATTERN_A), + u'the An Arse') def test_unthe_with_strip(self): config['the']['strip'] = True - self.assertEqual(ThePlugin().unthe('The Something', PATTERN_THE), - 'Something') - self.assertEqual(ThePlugin().unthe('An A', PATTERN_A), 'A') + self.assertEqual(ThePlugin().unthe(u'The Something', PATTERN_THE), + u'Something') + self.assertEqual(ThePlugin().unthe(u'An A', PATTERN_A), u'A') def test_template_function_with_defaults(self): ThePlugin().patterns = [PATTERN_THE, PATTERN_A] - ThePlugin().format = FORMAT - self.assertEqual(ThePlugin().the_template_func('The The'), 'The, The') - self.assertEqual(ThePlugin().the_template_func('An A'), 'A, An') + self.assertEqual(ThePlugin().the_template_func(u'The The'), + u'The, The') + self.assertEqual(ThePlugin().the_template_func(u'An A'), u'A, An') def test_custom_pattern(self): config['the']['patterns'] = [u'^test\s'] config['the']['format'] = FORMAT - self.assertEqual(ThePlugin().the_template_func('test passed'), - 'passed, test') + self.assertEqual(ThePlugin().the_template_func(u'test passed'), + u'passed, test') def test_custom_format(self): config['the']['patterns'] = [PATTERN_THE, PATTERN_A] config['the']['format'] = u'{1} ({0})' - self.assertEqual(ThePlugin().the_template_func('The A'), 'The (A)') + self.assertEqual(ThePlugin().the_template_func(u'The A'), u'The (A)') def suite(): diff -Nru beets-1.3.8+dfsg/test/test_thumbnails.py beets-1.3.19/test/test_thumbnails.py --- beets-1.3.8+dfsg/test/test_thumbnails.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_thumbnails.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Bruno Cauet +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import os.path +from mock import Mock, patch, call +from tempfile import mkdtemp +from shutil import rmtree + +from test._common import unittest +from test.helper import TestHelper + +from beets.util import bytestring_path +from beetsplug.thumbnails import (ThumbnailsPlugin, NORMAL_DIR, LARGE_DIR, + write_metadata_im, write_metadata_pil, + PathlibURI, GioURI) + + +class ThumbnailsTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + + def tearDown(self): + self.teardown_beets() + + @patch('beetsplug.thumbnails.util') + def test_write_metadata_im(self, mock_util): + metadata = {"a": u"A", "b": u"B"} + write_metadata_im("foo", metadata) + try: + command = u"convert foo -set a A -set b B foo".split(' ') + mock_util.command_output.assert_called_once_with(command) + except AssertionError: + command = u"convert foo -set b B -set a A foo".split(' ') + mock_util.command_output.assert_called_once_with(command) + + @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') + @patch('beetsplug.thumbnails.os.stat') + def test_add_tags(self, mock_stat, _): + plugin = ThumbnailsPlugin() + plugin.write_metadata = Mock() + plugin.get_uri = Mock(side_effect={b"/path/to/cover": + "COVER_URI"}.__getitem__) + album = Mock(artpath=b"/path/to/cover") + mock_stat.return_value.st_mtime = 12345 + + plugin.add_tags(album, b"/path/to/thumbnail") + + metadata = {"Thumb::URI": "COVER_URI", + "Thumb::MTime": u"12345"} + plugin.write_metadata.assert_called_once_with(b"/path/to/thumbnail", + metadata) + mock_stat.assert_called_once_with(album.artpath) + + @patch('beetsplug.thumbnails.os') + @patch('beetsplug.thumbnails.ArtResizer') + @patch('beetsplug.thumbnails.get_im_version') + @patch('beetsplug.thumbnails.get_pil_version') + @patch('beetsplug.thumbnails.GioURI') + def test_check_local_ok(self, mock_giouri, mock_pil, mock_im, + mock_artresizer, mock_os): + # test local resizing capability + mock_artresizer.shared.local = False + plugin = ThumbnailsPlugin() + self.assertFalse(plugin._check_local_ok()) + + # test dirs creation + mock_artresizer.shared.local = True + + def exists(path): + if path == NORMAL_DIR: + return False + if path == LARGE_DIR: + return True + raise ValueError(u"unexpected path {0!r}".format(path)) + mock_os.path.exists = exists + plugin = ThumbnailsPlugin() + mock_os.makedirs.assert_called_once_with(NORMAL_DIR) + self.assertTrue(plugin._check_local_ok()) + + # test metadata writer function + mock_os.path.exists = lambda _: True + mock_pil.return_value = False + mock_im.return_value = False + with self.assertRaises(AssertionError): + ThumbnailsPlugin() + + mock_pil.return_value = True + self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_pil) + + mock_im.return_value = True + self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im) + + mock_pil.return_value = False + self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im) + + self.assertTrue(ThumbnailsPlugin()._check_local_ok()) + + # test URI getter function + giouri_inst = mock_giouri.return_value + giouri_inst.available = True + self.assertEqual(ThumbnailsPlugin().get_uri, giouri_inst.uri) + + giouri_inst.available = False + self.assertEqual(ThumbnailsPlugin().get_uri.__self__.__class__, + PathlibURI) + + @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') + @patch('beetsplug.thumbnails.ArtResizer') + @patch('beetsplug.thumbnails.util') + @patch('beetsplug.thumbnails.os') + @patch('beetsplug.thumbnails.shutil') + def test_make_cover_thumbnail(self, mock_shutils, mock_os, mock_util, + mock_artresizer, _): + thumbnail_dir = os.path.normpath(b"/thumbnail/dir") + md5_file = os.path.join(thumbnail_dir, b"md5") + path_to_art = os.path.normpath(b"/path/to/art") + + mock_os.path.join = os.path.join # don't mock that function + plugin = ThumbnailsPlugin() + plugin.add_tags = Mock() + + album = Mock(artpath=path_to_art) + mock_util.syspath.side_effect = lambda x: x + plugin.thumbnail_file_name = Mock(return_value=b'md5') + mock_os.path.exists.return_value = False + + def os_stat(target): + if target == md5_file: + return Mock(st_mtime=1) + elif target == path_to_art: + return Mock(st_mtime=2) + else: + raise ValueError(u"invalid target {0}".format(target)) + mock_os.stat.side_effect = os_stat + + plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) + + mock_os.path.exists.assert_called_once_with(md5_file) + mock_os.stat.has_calls([call(md5_file), call(path_to_art)], + any_order=True) + + resize = mock_artresizer.shared.resize + resize.assert_called_once_with(12345, path_to_art, md5_file) + plugin.add_tags.assert_called_once_with(album, resize.return_value) + mock_shutils.move.assert_called_once_with(resize.return_value, + md5_file) + + # now test with recent thumbnail & with force + mock_os.path.exists.return_value = True + plugin.force = False + resize.reset_mock() + + def os_stat(target): + if target == md5_file: + return Mock(st_mtime=3) + elif target == path_to_art: + return Mock(st_mtime=2) + else: + raise ValueError(u"invalid target {0}".format(target)) + mock_os.stat.side_effect = os_stat + + plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) + self.assertEqual(resize.call_count, 0) + + # and with force + plugin.config['force'] = True + plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) + resize.assert_called_once_with(12345, path_to_art, md5_file) + + @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') + def test_make_dolphin_cover_thumbnail(self, _): + plugin = ThumbnailsPlugin() + tmp = bytestring_path(mkdtemp()) + album = Mock(path=tmp, + artpath=os.path.join(tmp, b"cover.jpg")) + plugin.make_dolphin_cover_thumbnail(album) + with open(os.path.join(tmp, b".directory"), "rb") as f: + self.assertEqual( + f.read().splitlines(), + [b"[Desktop Entry]", b"Icon=./cover.jpg"] + ) + + # not rewritten when it already exists (yup that's a big limitation) + album.artpath = b"/my/awesome/art.tiff" + plugin.make_dolphin_cover_thumbnail(album) + with open(os.path.join(tmp, b".directory"), "rb") as f: + self.assertEqual( + f.read().splitlines(), + [b"[Desktop Entry]", b"Icon=./cover.jpg"] + ) + + rmtree(tmp) + + @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') + @patch('beetsplug.thumbnails.ArtResizer') + def test_process_album(self, mock_artresizer, _): + get_size = mock_artresizer.shared.get_size + + plugin = ThumbnailsPlugin() + make_cover = plugin.make_cover_thumbnail = Mock(return_value=True) + make_dolphin = plugin.make_dolphin_cover_thumbnail = Mock() + + # no art + album = Mock(artpath=None) + plugin.process_album(album) + self.assertEqual(get_size.call_count, 0) + self.assertEqual(make_dolphin.call_count, 0) + + # cannot get art size + album.artpath = b"/path/to/art" + get_size.return_value = None + plugin.process_album(album) + get_size.assert_called_once_with(b"/path/to/art") + self.assertEqual(make_cover.call_count, 0) + + # dolphin tests + plugin.config['dolphin'] = False + plugin.process_album(album) + self.assertEqual(make_dolphin.call_count, 0) + + plugin.config['dolphin'] = True + plugin.process_album(album) + make_dolphin.assert_called_once_with(album) + + # small art + get_size.return_value = 200, 200 + plugin.process_album(album) + make_cover.assert_called_once_with(album, 128, NORMAL_DIR) + + # big art + make_cover.reset_mock() + get_size.return_value = 500, 500 + plugin.process_album(album) + make_cover.has_calls([call(album, 128, NORMAL_DIR), + call(album, 256, LARGE_DIR)], any_order=True) + + @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') + @patch('beetsplug.thumbnails.decargs') + def test_invokations(self, mock_decargs, _): + plugin = ThumbnailsPlugin() + plugin.process_album = Mock() + album = Mock() + + plugin.process_album.reset_mock() + lib = Mock() + album2 = Mock() + lib.albums.return_value = [album, album2] + plugin.process_query(lib, Mock(), None) + lib.albums.assert_called_once_with(mock_decargs.return_value) + plugin.process_album.has_calls([call(album), call(album2)], + any_order=True) + + @patch('beetsplug.thumbnails.BaseDirectory') + def test_thumbnail_file_name(self, mock_basedir): + plug = ThumbnailsPlugin() + plug.get_uri = Mock(return_value=u"file:///my/uri") + self.assertEqual(plug.thumbnail_file_name(b'idontcare'), + b"9488f5797fbe12ffb316d607dfd93d04.png") + + def test_uri(self): + gio = GioURI() + plib = PathlibURI() + if not gio.available: + self.skipTest(u"GIO library not found") + + self.assertEqual(gio.uri(u"/foo"), b"file:///") # silent fail + self.assertEqual(gio.uri(b"/foo"), b"file:///foo") + self.assertEqual(gio.uri(b"/foo!"), b"file:///foo!") + self.assertEqual(plib.uri(b"/foo!"), b"file:///foo%21") + self.assertEqual( + gio.uri(b'/music/\xec\x8b\xb8\xec\x9d\xb4'), + b'file:///music/%EC%8B%B8%EC%9D%B4') + self.assertEqual( + plib.uri(b'/music/\xec\x8b\xb8\xec\x9d\xb4'), + b'file:///music/%EC%8B%B8%EC%9D%B4') + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_types_plugin.py beets-1.3.19/test/test_types_plugin.py --- beets-1.3.8+dfsg/test/test_types_plugin.py 2014-09-14 18:35:06.000000000 +0000 +++ beets-1.3.19/test/test_types_plugin.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Thomas Scholtes. +# Copyright 2016, Thomas Scholtes. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,11 +13,13 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + import time from datetime import datetime -from _common import unittest -from helper import TestHelper +from test._common import unittest +from test.helper import TestHelper from beets.util.confit import ConfigValueError @@ -33,53 +36,69 @@ def test_integer_modify_and_query(self): self.config['types'] = {'myint': 'int'} - item = self.add_item(artist='aaa') + item = self.add_item(artist=u'aaa') # Do not match unset values - out = self.list('myint:1..3') - self.assertEqual('', out) + out = self.list(u'myint:1..3') + self.assertEqual(u'', out) - self.modify('myint=2') + self.modify(u'myint=2') item.load() self.assertEqual(item['myint'], 2) # Match in range - out = self.list('myint:1..3') - self.assertIn('aaa', out) + out = self.list(u'myint:1..3') + self.assertIn(b'aaa', out) + + def test_album_integer_modify_and_query(self): + self.config['types'] = {'myint': u'int'} + album = self.add_album(albumartist=u'aaa') + + # Do not match unset values + out = self.list_album(u'myint:1..3') + self.assertEqual(u'', out) + + self.modify(u'-a', u'myint=2') + album.load() + self.assertEqual(album['myint'], 2) + + # Match in range + out = self.list_album(u'myint:1..3') + self.assertIn(b'aaa', out) def test_float_modify_and_query(self): - self.config['types'] = {'myfloat': 'float'} - item = self.add_item(artist='aaa') + self.config['types'] = {'myfloat': u'float'} + item = self.add_item(artist=u'aaa') - self.modify('myfloat=-9.1') + self.modify(u'myfloat=-9.1') item.load() self.assertEqual(item['myfloat'], -9.1) # Match in range - out = self.list('myfloat:-10..0') - self.assertIn('aaa', out) + out = self.list(u'myfloat:-10..0') + self.assertIn(b'aaa', out) def test_bool_modify_and_query(self): - self.config['types'] = {'mybool': 'bool'} - true = self.add_item(artist='true') - false = self.add_item(artist='false') - self.add_item(artist='unset') + self.config['types'] = {'mybool': u'bool'} + true = self.add_item(artist=u'true') + false = self.add_item(artist=u'false') + self.add_item(artist=u'unset') # Set true - self.modify('mybool=1', 'artist:true') + self.modify(u'mybool=1', u'artist:true') true.load() self.assertEqual(true['mybool'], True) # Set false - self.modify('mybool=false', 'artist:false') + self.modify(u'mybool=false', u'artist:false') false.load() self.assertEqual(false['mybool'], False) # Query bools - out = self.list('mybool:true', '$artist $mybool') - self.assertEqual('true True', out) + out = self.list(u'mybool:true', u'$artist $mybool') + self.assertEqual(u'true True', out) - out = self.list('mybool:false', '$artist $mybool') + out = self.list(u'mybool:false', u'$artist $mybool') # Dealing with unset fields? # self.assertEqual('false False', out) @@ -87,23 +106,23 @@ # self.assertIn('unset $mybool', out) def test_date_modify_and_query(self): - self.config['types'] = {'mydate': 'date'} + self.config['types'] = {'mydate': u'date'} # FIXME parsing should also work with default time format self.config['time_format'] = '%Y-%m-%d' - old = self.add_item(artist='prince') - new = self.add_item(artist='britney') + old = self.add_item(artist=u'prince') + new = self.add_item(artist=u'britney') - self.modify('mydate=1999-01-01', 'artist:prince') + self.modify(u'mydate=1999-01-01', u'artist:prince') old.load() - self.assertEqual(old['mydate'], mktime(1999, 01, 01)) + self.assertEqual(old['mydate'], mktime(1999, 1, 1)) - self.modify('mydate=1999-12-30', 'artist:britney') + self.modify(u'mydate=1999-12-30', u'artist:britney') new.load() self.assertEqual(new['mydate'], mktime(1999, 12, 30)) # Match in range - out = self.list('mydate:..1999-07', '$artist $mydate') - self.assertEqual('prince 1999-01-01', out) + out = self.list(u'mydate:..1999-07', u'$artist $mydate') + self.assertEqual(u'prince 1999-01-01', out) # FIXME some sort of timezone issue here # out = self.list('mydate:1999-12-30', '$artist $mydate') @@ -112,14 +131,17 @@ def test_unknown_type_error(self): self.config['types'] = {'flex': 'unkown type'} with self.assertRaises(ConfigValueError): - self.run_command('ls') + self.run_command(u'ls') def modify(self, *args): - return self.run_with_output('modify', '--yes', '--nowrite', - '--nomove', *args) + return self.run_with_output(u'modify', u'--yes', u'--nowrite', + u'--nomove', *args) + + def list(self, query, fmt=u'$artist - $album - $title'): + return self.run_with_output(u'ls', u'-f', fmt, query).strip() - def list(self, query, fmt='$artist - $album - $title'): - return self.run_with_output('ls', '-f', fmt, query).strip() + def list_album(self, query, fmt=u'$albumartist - $album - $title'): + return self.run_with_output(u'ls', u'-a', u'-f', fmt, query).strip() def mktime(*args): diff -Nru beets-1.3.8+dfsg/test/test_ui_commands.py beets-1.3.19/test/test_ui_commands.py --- beets-1.3.8+dfsg/test/test_ui_commands.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_ui_commands.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Test module for file ui/commands.py +""" + +from __future__ import division, absolute_import, print_function + +import os +import shutil + +from test import _common +from test._common import unittest + +from beets import library +from beets import ui +from beets.ui import commands + + +class QueryTest(_common.TestCase): + def setUp(self): + super(QueryTest, self).setUp() + + self.libdir = os.path.join(self.temp_dir, b'testlibdir') + os.mkdir(self.libdir) + + # Add a file to the library but don't copy it in yet. + self.lib = library.Library(':memory:', self.libdir) + + # Alternate destination directory. + self.otherdir = os.path.join(self.temp_dir, b'testotherdir') + + def add_item(self, filename=b'srcfile', templatefile=b'full.mp3'): + itempath = os.path.join(self.libdir, filename) + shutil.copy(os.path.join(_common.RSRC, templatefile), itempath) + item = library.Item.from_path(itempath) + self.lib.add(item) + return item, itempath + + def add_album(self, items): + album = self.lib.add_album(items) + return album + + def check_do_query(self, num_items, num_albums, + q=(), album=False, also_items=True): + items, albums = commands._do_query( + self.lib, q, album, also_items) + self.assertEqual(len(items), num_items) + self.assertEqual(len(albums), num_albums) + + def test_query_empty(self): + with self.assertRaises(ui.UserError): + commands._do_query(self.lib, (), False) + + def test_query_empty_album(self): + with self.assertRaises(ui.UserError): + commands._do_query(self.lib, (), True) + + def test_query_item(self): + self.add_item() + self.check_do_query(1, 0, album=False) + self.add_item() + self.check_do_query(2, 0, album=False) + + def test_query_album(self): + item, itempath = self.add_item() + self.add_album([item]) + self.check_do_query(1, 1, album=True) + self.check_do_query(0, 1, album=True, also_items=False) + + item, itempath = self.add_item() + item2, itempath = self.add_item() + self.add_album([item, item2]) + self.check_do_query(3, 2, album=True) + self.check_do_query(0, 2, album=True, also_items=False) + + +class FieldsTest(_common.LibTestCase): + def setUp(self): + super(FieldsTest, self).setUp() + + self.io.install() + + def tearDown(self): + self.io.restore() + + def remove_keys(self, l, text): + for i in text: + try: + l.remove(i) + except ValueError: + pass + + def test_fields_func(self): + commands.fields_func(self.lib, [], []) + items = library.Item.all_keys() + albums = library.Album.all_keys() + + output = self.io.stdout.get().split() + self.remove_keys(items, output) + self.remove_keys(albums, output) + + self.assertEqual(len(items), 0) + self.assertEqual(len(albums), 0) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_ui_importer.py beets-1.3.19/test/test_ui_importer.py --- beets-1.3.8+dfsg/test/test_ui_importer.py 2014-04-16 03:29:29.000000000 +0000 +++ beets-1.3.19/test/test_ui_importer.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,11 +14,14 @@ # included in all copies or substantial portions of the Software. """Tests the TerminalImportSession. The tests are the same as in the + test_importer module. But here the test importer inherits from ``TerminalImportSession``. So we test this class, too. """ -from _common import unittest, DummyIO +from __future__ import division, absolute_import, print_function + +from test._common import unittest, DummyIO from test import test_importer from beets.ui.commands import TerminalImportSession from beets import importer @@ -54,21 +58,21 @@ choice = self.default_choice if choice == importer.action.APPLY: - self.io.addinput('A') + self.io.addinput(u'A') elif choice == importer.action.ASIS: - self.io.addinput('U') + self.io.addinput(u'U') elif choice == importer.action.ALBUMS: - self.io.addinput('G') + self.io.addinput(u'G') elif choice == importer.action.TRACKS: - self.io.addinput('T') + self.io.addinput(u'T') elif choice == importer.action.SKIP: - self.io.addinput('S') + self.io.addinput(u'S') elif isinstance(choice, int): - self.io.addinput('M') - self.io.addinput(str(choice)) + self.io.addinput(u'M') + self.io.addinput(unicode(choice)) self._add_choice_input() else: - raise Exception('Unknown choice %s' % choice) + raise Exception(u'Unknown choice %s' % choice) class TerminalImportSessionSetup(object): @@ -91,7 +95,7 @@ self.io = DummyIO() self.io.install() self.importer = TestTerminalImportSession( - self.lib, logfile=None, query=None, io=self.io, + self.lib, loghandler=None, query=None, io=self.io, paths=[import_dir or self.import_dir], ) diff -Nru beets-1.3.8+dfsg/test/test_ui_init.py beets-1.3.19/test/test_ui_init.py --- beets-1.3.8+dfsg/test/test_ui_init.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_ui_init.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Test module for file ui/__init__.py +""" + +from __future__ import division, absolute_import, print_function + +from test import _common +from test._common import unittest + +from beets import ui + + +class InputMethodsTest(_common.TestCase): + def setUp(self): + super(InputMethodsTest, self).setUp() + self.io.install() + + def _print_helper(self, s): + print(s) + + def _print_helper2(self, s, prefix): + print(prefix, s) + + def test_input_select_objects(self): + full_items = ['1', '2', '3', '4', '5'] + + # Test no + self.io.addinput('n') + items = ui.input_select_objects( + "Prompt", full_items, self._print_helper) + self.assertEqual(items, []) + + # Test yes + self.io.addinput('y') + items = ui.input_select_objects( + "Prompt", full_items, self._print_helper) + self.assertEqual(items, full_items) + + # Test selective 1 + self.io.addinput('s') + self.io.addinput('n') + self.io.addinput('y') + self.io.addinput('n') + self.io.addinput('y') + self.io.addinput('n') + items = ui.input_select_objects( + "Prompt", full_items, self._print_helper) + self.assertEqual(items, ['2', '4']) + + # Test selective 2 + self.io.addinput('s') + self.io.addinput('y') + self.io.addinput('y') + self.io.addinput('n') + self.io.addinput('y') + self.io.addinput('n') + items = ui.input_select_objects( + "Prompt", full_items, + lambda s: self._print_helper2(s, "Prefix")) + self.assertEqual(items, ['1', '2', '4']) + + +class InitTest(_common.LibTestCase): + def setUp(self): + super(InitTest, self).setUp() + + def test_human_bytes(self): + tests = [ + (0, '0.0 B'), + (30, '30.0 B'), + (pow(2, 10), '1.0 KiB'), + (pow(2, 20), '1.0 MiB'), + (pow(2, 30), '1.0 GiB'), + (pow(2, 40), '1.0 TiB'), + (pow(2, 50), '1.0 PiB'), + (pow(2, 60), '1.0 EiB'), + (pow(2, 70), '1.0 ZiB'), + (pow(2, 80), '1.0 YiB'), + (pow(2, 90), '1.0 HiB'), + (pow(2, 100), 'big'), + ] + for i, h in tests: + self.assertEqual(h, ui.human_bytes(i)) + + def test_human_seconds(self): + tests = [ + (0, '0.0 seconds'), + (30, '30.0 seconds'), + (60, '1.0 minutes'), + (90, '1.5 minutes'), + (125, '2.1 minutes'), + (3600, '1.0 hours'), + (86400, '1.0 days'), + (604800, '1.0 weeks'), + (31449600, '1.0 years'), + (314496000, '1.0 decades'), + ] + for i, h in tests: + self.assertEqual(h, ui.human_seconds(i)) + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_ui.py beets-1.3.19/test/test_ui.py --- beets-1.3.8+dfsg/test/test_ui.py 2014-09-14 19:10:40.000000000 +0000 +++ beets-1.3.19/test/test_ui.py 2016-06-26 00:42:09.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,15 +15,19 @@ """Tests for the command-line interface. """ +from __future__ import division, absolute_import, print_function + import os import shutil import re import subprocess import platform +from copy import deepcopy -import _common -from _common import unittest -from helper import capture_stdout, has_program, TestHelper, control_stdin +from mock import patch +from test import _common +from test._common import unittest +from test.helper import capture_stdout, has_program, TestHelper, control_stdin from beets import library from beets import ui @@ -33,6 +38,7 @@ from beets import config from beets import plugins from beets.util.confit import ConfigError +from beets import util class ListTest(unittest.TestCase): @@ -43,7 +49,7 @@ self.lib.add(self.item) self.lib.add_album([self.item]) - def _run_list(self, query='', album=False, path=False, fmt=None): + def _run_list(self, query=u'', album=False, path=False, fmt=''): commands.list_items(self.lib, query, album, fmt) def test_list_outputs_item(self): @@ -117,25 +123,38 @@ self.io.install() - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') os.mkdir(self.libdir) # Copy a file into the library. self.lib = library.Library(':memory:', self.libdir) - self.i = library.Item.from_path(os.path.join(_common.RSRC, 'full.mp3')) + item_path = os.path.join(_common.RSRC, b'full.mp3') + self.i = library.Item.from_path(item_path) self.lib.add(self.i) self.i.move(True) def test_remove_items_no_delete(self): self.io.addinput('y') - commands.remove_items(self.lib, '', False, False) + commands.remove_items(self.lib, u'', False, False, False) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertTrue(os.path.exists(self.i.path)) def test_remove_items_with_delete(self): self.io.addinput('y') - commands.remove_items(self.lib, '', False, True) + commands.remove_items(self.lib, u'', False, True, False) + items = self.lib.items() + self.assertEqual(len(list(items)), 0) + self.assertFalse(os.path.exists(self.i.path)) + + def test_remove_items_with_force_no_delete(self): + commands.remove_items(self.lib, u'', False, False, True) + items = self.lib.items() + self.assertEqual(len(list(items)), 0) + self.assertTrue(os.path.exists(self.i.path)) + + def test_remove_items_with_force_delete(self): + commands.remove_items(self.lib, u'', False, True, True) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertFalse(os.path.exists(self.i.path)) @@ -151,44 +170,68 @@ def tearDown(self): self.teardown_beets() - def modify(self, *args): - with control_stdin('y'): + def modify_inp(self, inp, *args): + with control_stdin(inp): ui._raw_main(['modify'] + list(args), self.lib) + def modify(self, *args): + self.modify_inp('y', *args) + # Item tests def test_modify_item(self): - self.modify("title=newTitle") + self.modify(u"title=newTitle") item = self.lib.items().get() - self.assertEqual(item.title, 'newTitle') + self.assertEqual(item.title, u'newTitle') + + def test_modify_item_abort(self): + item = self.lib.items().get() + title = item.title + self.modify_inp('n', u"title=newTitle") + item = self.lib.items().get() + self.assertEqual(item.title, title) + + def test_modify_item_no_change(self): + title = u"Tracktitle" + item = self.add_item_fixture(title=title) + self.modify_inp('y', u"title", u"title={0}".format(title)) + item = self.lib.items(title).get() + self.assertEqual(item.title, title) def test_modify_write_tags(self): - self.modify("title=newTitle") + self.modify(u"title=newTitle") item = self.lib.items().get() item.read() - self.assertEqual(item.title, 'newTitle') + self.assertEqual(item.title, u'newTitle') def test_modify_dont_write_tags(self): - self.modify("--nowrite", "title=newTitle") + self.modify(u"--nowrite", u"title=newTitle") item = self.lib.items().get() item.read() self.assertNotEqual(item.title, 'newTitle') def test_move(self): - self.modify("title=newTitle") + self.modify(u"title=newTitle") item = self.lib.items().get() - self.assertIn('newTitle', item.path) + self.assertIn(b'newTitle', item.path) def test_not_move(self): - self.modify("--nomove", "title=newTitle") + self.modify(u"--nomove", u"title=newTitle") item = self.lib.items().get() - self.assertNotIn('newTitle', item.path) + self.assertNotIn(b'newTitle', item.path) + + def test_no_write_no_move(self): + self.modify(u"--nomove", u"--nowrite", u"title=newTitle") + item = self.lib.items().get() + item.read() + self.assertNotIn(b'newTitle', item.path) + self.assertNotEqual(item.title, u'newTitle') def test_update_mtime(self): item = self.item old_mtime = item.mtime - self.modify("title=newTitle") + self.modify(u"title=newTitle") item.load() self.assertNotEqual(old_mtime, item.mtime) self.assertEqual(item.current_mtime(), item.mtime) @@ -196,100 +239,116 @@ def test_reset_mtime_with_no_write(self): item = self.item - self.modify("--nowrite", "title=newTitle") + self.modify(u"--nowrite", u"title=newTitle") item.load() self.assertEqual(0, item.mtime) + def test_selective_modify(self): + title = u"Tracktitle" + album = u"album" + original_artist = u"composer" + new_artist = u"coverArtist" + for i in range(0, 10): + self.add_item_fixture(title=u"{0}{1}".format(title, i), + artist=original_artist, + album=album) + self.modify_inp('s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn', + title, u"artist={0}".format(new_artist)) + original_items = self.lib.items(u"artist:{0}".format(original_artist)) + new_items = self.lib.items(u"artist:{0}".format(new_artist)) + self.assertEqual(len(list(original_items)), 3) + self.assertEqual(len(list(new_items)), 7) + # Album Tests def test_modify_album(self): - self.modify("--album", "album=newAlbum") + self.modify(u"--album", u"album=newAlbum") album = self.lib.albums().get() - self.assertEqual(album.album, 'newAlbum') + self.assertEqual(album.album, u'newAlbum') def test_modify_album_write_tags(self): - self.modify("--album", "album=newAlbum") + self.modify(u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() - self.assertEqual(item.album, 'newAlbum') + self.assertEqual(item.album, u'newAlbum') def test_modify_album_dont_write_tags(self): - self.modify("--album", "--nowrite", "album=newAlbum") + self.modify(u"--album", u"--nowrite", u"album=newAlbum") item = self.lib.items().get() item.read() - self.assertEqual(item.album, 'the album') + self.assertEqual(item.album, u'the album') def test_album_move(self): - self.modify("--album", "album=newAlbum") + self.modify(u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() - self.assertIn('newAlbum', item.path) + self.assertIn(b'newAlbum', item.path) def test_album_not_move(self): - self.modify("--nomove", "--album", "album=newAlbum") + self.modify(u"--nomove", u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() - self.assertNotIn('newAlbum', item.path) + self.assertNotIn(b'newAlbum', item.path) # Misc def test_write_initial_key_tag(self): - self.modify("initial_key=C#m") + self.modify(u"initial_key=C#m") item = self.lib.items().get() mediafile = MediaFile(item.path) - self.assertEqual(mediafile.initial_key, 'C#m') + self.assertEqual(mediafile.initial_key, u'C#m') def test_set_flexattr(self): - self.modify("flexattr=testAttr") + self.modify(u"flexattr=testAttr") item = self.lib.items().get() - self.assertEqual(item.flexattr, 'testAttr') + self.assertEqual(item.flexattr, u'testAttr') def test_remove_flexattr(self): item = self.lib.items().get() - item.flexattr = 'testAttr' + item.flexattr = u'testAttr' item.store() - self.modify("flexattr!") + self.modify(u"flexattr!") item = self.lib.items().get() - self.assertNotIn("flexattr", item) + self.assertNotIn(u"flexattr", item) - @unittest.skip('not yet implemented') + @unittest.skip(u'not yet implemented') def test_delete_initial_key_tag(self): item = self.lib.items().get() - item.initial_key = 'C#m' + item.initial_key = u'C#m' item.write() item.store() mediafile = MediaFile(item.path) - self.assertEqual(mediafile.initial_key, 'C#m') + self.assertEqual(mediafile.initial_key, u'C#m') - self.modify("initial_key!") + self.modify(u"initial_key!") mediafile = MediaFile(item.path) self.assertIsNone(mediafile.initial_key) def test_arg_parsing_colon_query(self): - (query, mods, dels) = commands.modify_parse_args(["title:oldTitle", - "title=newTitle"]) - self.assertEqual(query, ["title:oldTitle"]) - self.assertEqual(mods, {"title": "newTitle"}) + (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle", + u"title=newTitle"]) + self.assertEqual(query, [u"title:oldTitle"]) + self.assertEqual(mods, {"title": u"newTitle"}) def test_arg_parsing_delete(self): - (query, mods, dels) = commands.modify_parse_args(["title:oldTitle", - "title!"]) - self.assertEqual(query, ["title:oldTitle"]) + (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle", + u"title!"]) + self.assertEqual(query, [u"title:oldTitle"]) self.assertEqual(dels, ["title"]) def test_arg_parsing_query_with_exclaimation(self): - (query, mods, dels) = commands.modify_parse_args(["title:oldTitle!", - "title=newTitle!"]) - self.assertEqual(query, ["title:oldTitle!"]) - self.assertEqual(mods, {"title": "newTitle!"}) + (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle!", + u"title=newTitle!"]) + self.assertEqual(query, [u"title:oldTitle!"]) + self.assertEqual(mods, {"title": u"newTitle!"}) def test_arg_parsing_equals_in_value(self): - (query, mods, dels) = commands.modify_parse_args(["title:foo=bar", - "title=newTitle"]) - self.assertEqual(query, ["title:foo=bar"]) - self.assertEqual(mods, {"title": "newTitle"}) + (query, mods, dels) = commands.modify_parse_args([u"title:foo=bar", + u"title=newTitle"]) + self.assertEqual(query, [u"title:foo=bar"]) + self.assertEqual(mods, {"title": u"newTitle"}) class WriteTest(unittest.TestCase, TestHelper): @@ -305,7 +364,7 @@ def test_update_mtime(self): item = self.add_item_fixture() - item['title'] = 'a new title' + item['title'] = u'a new title' item.store() item = self.lib.items().get() @@ -315,6 +374,37 @@ item = self.lib.items().get() self.assertEqual(item.mtime, item.current_mtime()) + def test_non_metadata_field_unchanged(self): + """Changing a non-"tag" field like `bitrate` and writing should + have no effect. + """ + # An item that starts out "clean". + item = self.add_item_fixture() + item.read() + + # ... but with a mismatched bitrate. + item.bitrate = 123 + item.store() + + with capture_stdout() as stdout: + self.write_cmd() + + self.assertEqual(stdout.getvalue(), '') + + def test_write_metadata_field(self): + item = self.add_item_fixture() + item.read() + old_title = item.title + + item.title = u'new title' + item.store() + + with capture_stdout() as stdout: + self.write_cmd() + + self.assertTrue(u'{0} -> new title'.format(old_title) + in stdout.getvalue()) + class MoveTest(_common.TestCase): def setUp(self): @@ -322,11 +412,11 @@ self.io.install() - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') os.mkdir(self.libdir) - self.itempath = os.path.join(self.libdir, 'srcfile') - shutil.copy(os.path.join(_common.RSRC, 'full.mp3'), self.itempath) + self.itempath = os.path.join(self.libdir, b'srcfile') + shutil.copy(os.path.join(_common.RSRC, b'full.mp3'), self.itempath) # Add a file to the library but don't copy it in yet. self.lib = library.Library(':memory:', self.libdir) @@ -335,53 +425,64 @@ self.album = self.lib.add_album([self.i]) # Alternate destination directory. - self.otherdir = os.path.join(self.temp_dir, 'testotherdir') + self.otherdir = os.path.join(self.temp_dir, b'testotherdir') - def _move(self, query=(), dest=None, copy=False, album=False): - commands.move_items(self.lib, dest, query, copy, album) + def _move(self, query=(), dest=None, copy=False, album=False, + pretend=False): + commands.move_items(self.lib, dest, query, copy, album, pretend) def test_move_item(self): self._move() self.i.load() - self.assertTrue('testlibdir' in self.i.path) + self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_copy_item(self): self._move(copy=True) self.i.load() - self.assertTrue('testlibdir' in self.i.path) + self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertExists(self.itempath) def test_move_album(self): self._move(album=True) self.i.load() - self.assertTrue('testlibdir' in self.i.path) + self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_copy_album(self): self._move(copy=True, album=True) self.i.load() - self.assertTrue('testlibdir' in self.i.path) + self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertExists(self.itempath) def test_move_item_custom_dir(self): self._move(dest=self.otherdir) self.i.load() - self.assertTrue('testotherdir' in self.i.path) + self.assertTrue(b'testotherdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_move_album_custom_dir(self): self._move(dest=self.otherdir, album=True) self.i.load() - self.assertTrue('testotherdir' in self.i.path) + self.assertTrue(b'testotherdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) + def test_pretend_move_item(self): + self._move(dest=self.otherdir, pretend=True) + self.i.load() + self.assertIn(b'srcfile', self.i.path) + + def test_pretend_move_album(self): + self._move(album=True, pretend=True) + self.i.load() + self.assertIn(b'srcfile', self.i.path) + class UpdateTest(_common.TestCase): def setUp(self): @@ -389,17 +490,18 @@ self.io.install() - self.libdir = os.path.join(self.temp_dir, 'testlibdir') + self.libdir = os.path.join(self.temp_dir, b'testlibdir') # Copy a file into the library. self.lib = library.Library(':memory:', self.libdir) - self.i = library.Item.from_path(os.path.join(_common.RSRC, 'full.mp3')) + item_path = os.path.join(_common.RSRC, b'full.mp3') + self.i = library.Item.from_path(item_path) self.lib.add(self.i) self.i.move(True) self.album = self.lib.add_album([self.i]) # Album art. - artfile = os.path.join(self.temp_dir, 'testart.jpg') + artfile = os.path.join(self.temp_dir, b'testart.jpg') _common.touch(artfile) self.album.set_art(artfile) self.album.store() @@ -433,40 +535,40 @@ def test_modified_metadata_detected(self): mf = MediaFile(self.i.path) - mf.title = 'differentTitle' + mf.title = u'differentTitle' mf.save() self._update() item = self.lib.items().get() - self.assertEqual(item.title, 'differentTitle') + self.assertEqual(item.title, u'differentTitle') def test_modified_metadata_moved(self): mf = MediaFile(self.i.path) - mf.title = 'differentTitle' + mf.title = u'differentTitle' mf.save() self._update(move=True) item = self.lib.items().get() - self.assertTrue('differentTitle' in item.path) + self.assertTrue(b'differentTitle' in item.path) def test_modified_metadata_not_moved(self): mf = MediaFile(self.i.path) - mf.title = 'differentTitle' + mf.title = u'differentTitle' mf.save() self._update(move=False) item = self.lib.items().get() - self.assertTrue('differentTitle' not in item.path) + self.assertTrue(b'differentTitle' not in item.path) def test_modified_album_metadata_moved(self): mf = MediaFile(self.i.path) - mf.album = 'differentAlbum' + mf.album = u'differentAlbum' mf.save() self._update(move=True) item = self.lib.items().get() - self.assertTrue('differentAlbum' in item.path) + self.assertTrue(b'differentAlbum' in item.path) def test_modified_album_metadata_art_moved(self): artpath = self.album.artpath mf = MediaFile(self.i.path) - mf.album = 'differentAlbum' + mf.album = u'differentAlbum' mf.save() self._update(move=True) album = self.lib.albums()[0] @@ -474,7 +576,7 @@ def test_mtime_match_skips_update(self): mf = MediaFile(self.i.path) - mf.title = 'differentTitle' + mf.title = u'differentTitle' mf.save() # Make in-memory mtime match on-disk mtime. @@ -483,7 +585,7 @@ self._update(reset_mtime=False) item = self.lib.items().get() - self.assertEqual(item.title, 'full') + self.assertEqual(item.title, u'full') class PrintTest(_common.TestCase): @@ -499,7 +601,7 @@ try: ui.print_(u'something') except TypeError: - self.fail('TypeError during print') + self.fail(u'TypeError during print') finally: if lang: os.environ['LANG'] = lang @@ -513,7 +615,7 @@ try: ui.print_(u'something') except ValueError: - self.fail('ValueError during print') + self.fail(u'ValueError during print') finally: if old_lang: os.environ['LANG'] = old_lang @@ -539,14 +641,15 @@ self.io.install() def test_manual_search_gets_unicode(self): - self.io.addinput('\xc3\x82me') - self.io.addinput('\xc3\x82me') + self.io.addinput(b'\xc3\x82me') + self.io.addinput(b'\xc3\x82me') artist, album = commands.manual_search(False) self.assertEqual(artist, u'\xc2me') self.assertEqual(album, u'\xc2me') -class ConfigTest(unittest.TestCase, TestHelper): +@_common.slow_test() +class ConfigTest(unittest.TestCase, TestHelper, _common.Assertions): def setUp(self): self.setup_beets() @@ -556,6 +659,11 @@ self._old_home = os.environ.get('HOME') os.environ['HOME'] = self.temp_dir + # Also set APPDATA, the Windows equivalent of setting $HOME. + self._old_appdata = os.environ.get('APPDATA') + os.environ['APPDATA'] = \ + os.path.join(self.temp_dir, 'AppData', 'Roaming') + self._orig_cwd = os.getcwd() self.test_cmd = self._make_test_cmd() commands.default_commands.append(self.test_cmd) @@ -582,11 +690,16 @@ def tearDown(self): commands.default_commands.pop() os.chdir(self._orig_cwd) - os.environ['HOME'] = self._old_home + if self._old_home is not None: + os.environ['HOME'] = self._old_home + if self._old_appdata is None: + del os.environ['APPDATA'] + else: + os.environ['APPDATA'] = self._old_appdata self.teardown_beets() def _make_test_cmd(self): - test_cmd = ui.Subcommand('test', help='test') + test_cmd = ui.Subcommand('test', help=u'test') def run(lib, options, args): test_cmd.lib = lib @@ -647,7 +760,7 @@ ui._raw_main(['test']) replacements = self.test_cmd.lib.replacements - self.assertEqual(replacements, [(re.compile(ur'[xy]'), u'z')]) + self.assertEqual(replacements, [(re.compile(u'[xy]'), 'z')]) def test_multiple_replacements_parsed(self): with self.write_config_file() as config: @@ -656,8 +769,8 @@ ui._raw_main(['test']) replacements = self.test_cmd.lib.replacements self.assertEqual(replacements, [ - (re.compile(ur'[xy]'), u'z'), - (re.compile(ur'foo'), u'bar'), + (re.compile(u'[xy]'), u'z'), + (re.compile(u'foo'), u'bar'), ]) def test_cli_config_option(self): @@ -731,10 +844,14 @@ file.write('statefile: state') ui._raw_main(['--config', cli_config_path, 'test']) - self.assertEqual(config['library'].as_filename(), - os.path.join(self.user_config_dir, 'beets.db')) - self.assertEqual(config['statefile'].as_filename(), - os.path.join(self.user_config_dir, 'state')) + self.assert_equal_path( + config['library'].as_filename(), + os.path.join(self.user_config_dir, 'beets.db') + ) + self.assert_equal_path( + config['statefile'].as_filename(), + os.path.join(self.user_config_dir, 'state') + ) def test_cli_config_paths_resolve_relative_to_beetsdir(self): os.environ['BEETSDIR'] = self.beetsdir @@ -745,23 +862,21 @@ file.write('statefile: state') ui._raw_main(['--config', cli_config_path, 'test']) - self.assertEqual(config['library'].as_filename(), - os.path.join(self.beetsdir, 'beets.db')) - self.assertEqual(config['statefile'].as_filename(), - os.path.join(self.beetsdir, 'state')) + self.assert_equal_path(config['library'].as_filename(), + os.path.join(self.beetsdir, 'beets.db')) + self.assert_equal_path(config['statefile'].as_filename(), + os.path.join(self.beetsdir, 'state')) def test_command_line_option_relative_to_working_dir(self): os.chdir(self.temp_dir) ui._raw_main(['--library', 'foo.db', 'test']) - self.assertEqual(config['library'].as_filename(), - os.path.join(os.getcwd(), 'foo.db')) + self.assert_equal_path(config['library'].as_filename(), + os.path.join(os.getcwd(), 'foo.db')) def test_cli_config_file_loads_plugin_commands(self): - plugin_path = os.path.join(_common.RSRC, 'beetsplug') - cli_config_path = os.path.join(self.temp_dir, 'config.yaml') with open(cli_config_path, 'w') as file: - file.write('pluginpath: %s\n' % plugin_path) + file.write('pluginpath: %s\n' % _common.PLUGINPATH) file.write('plugins: test') ui._raw_main(['--config', cli_config_path, 'plugin']) @@ -838,34 +953,34 @@ self.b.title = 'x' change, out = self._show() self.assertTrue(change) - self.assertTrue('title' in out) + self.assertTrue(u'title' in out) def test_int_fixed_field_change(self): self.b.track = 9 change, out = self._show() self.assertTrue(change) - self.assertTrue('track' in out) + self.assertTrue(u'track' in out) def test_floats_close_to_identical(self): self.a.length = 1.00001 self.b.length = 1.00005 change, out = self._show() self.assertFalse(change) - self.assertEqual(out, '') + self.assertEqual(out, u'') def test_floats_different(self): self.a.length = 1.00001 self.b.length = 2.00001 change, out = self._show() self.assertTrue(change) - self.assertTrue('length' in out) + self.assertTrue(u'length' in out) def test_both_values_shown(self): - self.a.title = 'foo' - self.b.title = 'bar' + self.a.title = u'foo' + self.b.title = u'bar' change, out = self._show() - self.assertTrue('foo' in out) - self.assertTrue('bar' in out) + self.assertTrue(u'foo' in out) + self.assertTrue(u'bar' in out) class ShowChangeTest(_common.TestCase): @@ -875,7 +990,7 @@ self.items = [_common.item()] self.items[0].track = 1 - self.items[0].path = '/path/to/file.mp3' + self.items[0].path = b'/path/to/file.mp3' self.info = autotag.AlbumInfo( u'the album', u'album id', u'the artist', u'artist id', [ autotag.TrackInfo(u'the title', u'track id', index=1) @@ -885,10 +1000,11 @@ def _show_change(self, items=None, info=None, cur_artist=u'the artist', cur_album=u'the album', dist=0.1): + """Return an unicode string representing the changes""" items = items or self.items info = info or self.info mapping = dict(zip(items, info.tracks)) - config['color'] = False + config['ui']['color'] = False album_dist = distance(items, info, mapping) album_dist._penalties = {'album': [dist]} commands.show_change( @@ -896,7 +1012,8 @@ cur_album, autotag.AlbumMatch(album_dist, info, mapping, set(), set()), ) - return self.io.getoutput().lower() + # FIXME decoding shouldn't be done here + return self.io.getoutput().lower().decode('utf8') def test_null_change(self): msg = self._show_change() @@ -916,24 +1033,61 @@ def test_item_data_change_with_unicode(self): self.items[0].title = u'caf\xe9' msg = self._show_change() - self.assertTrue(u'caf\xe9 -> the title' in msg.decode('utf8')) + self.assertTrue(u'caf\xe9 -> the title' in msg) def test_album_data_change_with_unicode(self): msg = self._show_change(cur_artist=u'caf\xe9', cur_album=u'another album') - self.assertTrue('correcting tags from:' in msg) + self.assertTrue(u'correcting tags from:' in msg) def test_item_data_change_title_missing(self): self.items[0].title = u'' msg = re.sub(r' +', ' ', self._show_change()) - self.assertTrue('file.mp3 -> the title' in msg) + self.assertTrue(u'file.mp3 -> the title' in msg) def test_item_data_change_title_missing_with_unicode_filename(self): self.items[0].title = u'' self.items[0].path = u'/path/to/caf\xe9.mp3'.encode('utf8') - msg = re.sub(r' +', ' ', self._show_change().decode('utf8')) - self.assertTrue(u'caf\xe9.mp3 -> the title' in msg - or u'caf.mp3 ->' in msg) + msg = re.sub(r' +', ' ', self._show_change()) + self.assertTrue(u'caf\xe9.mp3 -> the title' in msg or + u'caf.mp3 ->' in msg) + + +class SummarizeItemsTest(_common.TestCase): + def setUp(self): + super(SummarizeItemsTest, self).setUp() + item = library.Item() + item.bitrate = 4321 + item.length = 10 * 60 + 54 + item.format = "F" + self.item = item + fsize_mock = patch('beets.library.Item.try_filesize').start() + fsize_mock.return_value = 987 + + def test_summarize_item(self): + summary = commands.summarize_items([], True) + self.assertEqual(summary, u"") + + summary = commands.summarize_items([self.item], True) + self.assertEqual(summary, u"F, 4kbps, 10:54, 987.0 B") + + def test_summarize_items(self): + summary = commands.summarize_items([], False) + self.assertEqual(summary, u"0 items") + + summary = commands.summarize_items([self.item], False) + self.assertEqual(summary, u"1 items, F, 4kbps, 10:54, 987.0 B") + + i2 = deepcopy(self.item) + summary = commands.summarize_items([self.item, i2], False) + self.assertEqual(summary, u"2 items, F, 4kbps, 21:48, 1.9 KiB") + + i2.format = "G" + summary = commands.summarize_items([self.item, i2], False) + self.assertEqual(summary, u"2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB") + + summary = commands.summarize_items([self.item, i2, i2], False) + self.assertEqual(summary, u"3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB") class PathFormatTest(_common.TestCase): @@ -943,58 +1097,257 @@ config['paths'] = {u'foo': u'bar'} pf = ui.get_path_formats() key, tmpl = pf[0] - self.assertEqual(key, 'foo') - self.assertEqual(tmpl.original, 'bar') + self.assertEqual(key, u'foo') + self.assertEqual(tmpl.original, u'bar') self.assertEqual(pf[1:], default_formats) +@_common.slow_test() class PluginTest(_common.TestCase): def test_plugin_command_from_pluginpath(self): - config['pluginpath'] = [os.path.join(_common.RSRC, 'beetsplug')] + config['pluginpath'] = [_common.PLUGINPATH] config['plugins'] = ['test'] ui._raw_main(['test']) +@_common.slow_test() class CompletionTest(_common.TestCase): def test_completion(self): # Load plugin commands - config['pluginpath'] = [os.path.join(_common.RSRC, 'beetsplug')] + config['pluginpath'] = [_common.PLUGINPATH] config['plugins'] = ['test'] - test_script = os.path.join( - os.path.dirname(__file__), 'test_completion.sh' - ) - bash_completion = os.path.abspath(os.environ.get( - 'BASH_COMPLETION_SCRIPT', '/etc/bash_completion')) - # Tests run in bash cmd = os.environ.get('BEETS_TEST_SHELL', '/bin/bash --norc').split() if not has_program(cmd[0]): - self.skipTest('bash not available') + self.skipTest(u'bash not available') tester = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) - # Load bash_completion + # Load bash_completion library. + for path in commands.BASH_COMPLETION_PATHS: + if os.path.exists(util.syspath(path)): + bash_completion = path + break + else: + self.skipTest(u'bash-completion script not found') try: - with open(bash_completion, 'r') as bash_completion: - tester.stdin.writelines(bash_completion) + with open(util.syspath(bash_completion), 'r') as f: + tester.stdin.writelines(f) except IOError: - self.skipTest('bash-completion script not found') + self.skipTest(u'could not read bash-completion script') - # Load complection script + # Load completion script. self.io.install() ui._raw_main(['completion']) completion_script = self.io.getoutput() self.io.restore() tester.stdin.writelines(completion_script) - # Load testsuite + # Load test suite. + test_script = os.path.join(_common.RSRC, b'test_completion.sh') with open(test_script, 'r') as test_script: tester.stdin.writelines(test_script) (out, err) = tester.communicate() - if tester.returncode != 0 or out != "completion tests passed\n": + if tester.returncode != 0 or out != u"completion tests passed\n": print(out) - self.fail('test/test_completion.sh did not execute properly') + self.fail(u'test/test_completion.sh did not execute properly') + + +class CommonOptionsParserCliTest(unittest.TestCase, TestHelper): + """Test CommonOptionsParser and formatting LibModel formatting on 'list' + command. + """ + def setUp(self): + self.setup_beets() + self.lib = library.Library(':memory:') + self.item = _common.item() + self.item.path = b'xxx/yyy' + self.lib.add(self.item) + self.lib.add_album([self.item]) + + def tearDown(self): + self.teardown_beets() + + def test_base(self): + l = self.run_with_output(u'ls') + self.assertEqual(l, u'the artist - the album - the title\n') + + l = self.run_with_output(u'ls', u'-a') + self.assertEqual(l, u'the album artist - the album\n') + + def test_path_option(self): + l = self.run_with_output(u'ls', u'-p') + self.assertEqual(l, u'xxx/yyy\n') + + l = self.run_with_output(u'ls', u'-a', u'-p') + self.assertEqual(l, u'xxx\n') + + def test_format_option(self): + l = self.run_with_output(u'ls', u'-f', u'$artist') + self.assertEqual(l, u'the artist\n') + + l = self.run_with_output(u'ls', u'-a', u'-f', u'$albumartist') + self.assertEqual(l, u'the album artist\n') + + def test_format_option_unicode(self): + l = self.run_with_output(b'ls', b'-f', + u'caf\xe9'.encode(ui._arg_encoding())) + self.assertEqual(l, u'caf\xe9\n') + + def test_root_format_option(self): + l = self.run_with_output(u'--format-item', u'$artist', + u'--format-album', u'foo', u'ls') + self.assertEqual(l, u'the artist\n') + + l = self.run_with_output(u'--format-item', u'foo', + u'--format-album', u'$albumartist', + u'ls', u'-a') + self.assertEqual(l, u'the album artist\n') + + def test_help(self): + l = self.run_with_output(u'help') + self.assertIn(u'Usage:', l) + + l = self.run_with_output(u'help', u'list') + self.assertIn(u'Usage:', l) + + with self.assertRaises(ui.UserError): + self.run_command(u'help', u'this.is.not.a.real.command') + + def test_stats(self): + l = self.run_with_output(u'stats') + self.assertIn(u'Approximate total size:', l) + + # # Need to have more realistic library setup for this to work + # l = self.run_with_output('stats', '-e') + # self.assertIn('Total size:', l) + + def test_version(self): + l = self.run_with_output(u'version') + self.assertIn(u'no plugins loaded', l) + + # # Need to have plugin loaded + # l = self.run_with_output('version') + # self.assertIn('plugins: ', l) + + +class CommonOptionsParserTest(unittest.TestCase, TestHelper): + def setUp(self): + self.setup_beets() + + def tearDown(self): + self.teardown_beets() + + def test_album_option(self): + parser = ui.CommonOptionsParser() + self.assertFalse(parser._album_flags) + parser.add_album_option() + self.assertTrue(bool(parser._album_flags)) + + self.assertEqual(parser.parse_args([]), ({'album': None}, [])) + self.assertEqual(parser.parse_args([u'-a']), ({'album': True}, [])) + self.assertEqual(parser.parse_args([u'--album']), + ({'album': True}, [])) + + def test_path_option(self): + parser = ui.CommonOptionsParser() + parser.add_path_option() + self.assertFalse(parser._album_flags) + + config['format_item'].set('$foo') + self.assertEqual(parser.parse_args([]), ({'path': None}, [])) + self.assertEqual(config['format_item'].get(unicode), u'$foo') + + self.assertEqual(parser.parse_args([u'-p']), + ({'path': True, 'format': u'$path'}, [])) + self.assertEqual(parser.parse_args(['--path']), + ({'path': True, 'format': u'$path'}, [])) + + self.assertEqual(config['format_item'].get(unicode), u'$path') + self.assertEqual(config['format_album'].get(unicode), u'$path') + + def test_format_option(self): + parser = ui.CommonOptionsParser() + parser.add_format_option() + self.assertFalse(parser._album_flags) + + config['format_item'].set('$foo') + self.assertEqual(parser.parse_args([]), ({'format': None}, [])) + self.assertEqual(config['format_item'].get(unicode), u'$foo') + + self.assertEqual(parser.parse_args([u'-f', u'$bar']), + ({'format': u'$bar'}, [])) + self.assertEqual(parser.parse_args([u'--format', u'$baz']), + ({'format': u'$baz'}, [])) + + self.assertEqual(config['format_item'].get(unicode), u'$baz') + self.assertEqual(config['format_album'].get(unicode), u'$baz') + + def test_format_option_with_target(self): + with self.assertRaises(KeyError): + ui.CommonOptionsParser().add_format_option(target='thingy') + + parser = ui.CommonOptionsParser() + parser.add_format_option(target='item') + + config['format_item'].set('$item') + config['format_album'].set('$album') + + self.assertEqual(parser.parse_args([u'-f', u'$bar']), + ({'format': u'$bar'}, [])) + + self.assertEqual(config['format_item'].get(unicode), u'$bar') + self.assertEqual(config['format_album'].get(unicode), u'$album') + + def test_format_option_with_album(self): + parser = ui.CommonOptionsParser() + parser.add_album_option() + parser.add_format_option() + + config['format_item'].set('$item') + config['format_album'].set('$album') + + parser.parse_args([u'-f', u'$bar']) + self.assertEqual(config['format_item'].get(unicode), u'$bar') + self.assertEqual(config['format_album'].get(unicode), u'$album') + + parser.parse_args([u'-a', u'-f', u'$foo']) + self.assertEqual(config['format_item'].get(unicode), u'$bar') + self.assertEqual(config['format_album'].get(unicode), u'$foo') + + parser.parse_args([u'-f', u'$foo2', u'-a']) + self.assertEqual(config['format_album'].get(unicode), u'$foo2') + + def test_add_all_common_options(self): + parser = ui.CommonOptionsParser() + parser.add_all_common_options() + self.assertEqual(parser.parse_args([]), + ({'album': None, 'path': None, 'format': None}, [])) + + +class EncodingTest(_common.TestCase): + """Tests for the `terminal_encoding` config option and our + `_in_encoding` and `_out_encoding` utility functions. + """ + + def out_encoding_overridden(self): + config['terminal_encoding'] = 'fake_encoding' + self.assertEqual(ui._out_encoding(), 'fake_encoding') + + def in_encoding_overridden(self): + config['terminal_encoding'] = 'fake_encoding' + self.assertEqual(ui._in_encoding(), 'fake_encoding') + + def out_encoding_default_utf8(self): + with patch('sys.stdout') as stdout: + stdout.encoding = None + self.assertEqual(ui._out_encoding(), 'utf8') + + def in_encoding_default_utf8(self): + with patch('sys.stdin') as stdin: + stdin.encoding = None + self.assertEqual(ui._in_encoding(), 'utf8') def suite(): diff -Nru beets-1.3.8+dfsg/test/test_util.py beets-1.3.19/test/test_util.py --- beets-1.3.8+dfsg/test/test_util.py 1970-01-01 00:00:00.000000000 +0000 +++ beets-1.3.19/test/test_util.py 2016-06-20 01:53:12.000000000 +0000 @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +"""Tests for base utils from the beets.util package. +""" +from __future__ import division, absolute_import, print_function + +import sys +import re +import os +import subprocess + +from mock import patch, Mock + +from test._common import unittest +from test import _common +from beets import util + + +class UtilTest(unittest.TestCase): + def test_open_anything(self): + with _common.system_mock('Windows'): + self.assertEqual(util.open_anything(), 'start') + + with _common.system_mock('Darwin'): + self.assertEqual(util.open_anything(), 'open') + + with _common.system_mock('Tagada'): + self.assertEqual(util.open_anything(), 'xdg-open') + + @patch('os.execlp') + @patch('beets.util.open_anything') + def test_interactive_open(self, mock_open, mock_execlp): + mock_open.return_value = u'tagada' + util.interactive_open(['foo'], util.open_anything()) + mock_execlp.assert_called_once_with(u'tagada', u'tagada', u'foo') + mock_execlp.reset_mock() + + util.interactive_open(['foo'], u'bar') + mock_execlp.assert_called_once_with(u'bar', u'bar', u'foo') + + def test_sanitize_unix_replaces_leading_dot(self): + with _common.platform_posix(): + p = util.sanitize_path(u'one/.two/three') + self.assertFalse(u'.' in p) + + def test_sanitize_windows_replaces_trailing_dot(self): + with _common.platform_windows(): + p = util.sanitize_path(u'one/two./three') + self.assertFalse(u'.' in p) + + def test_sanitize_windows_replaces_illegal_chars(self): + with _common.platform_windows(): + p = util.sanitize_path(u':*?"<>|') + self.assertFalse(u':' in p) + self.assertFalse(u'*' in p) + self.assertFalse(u'?' in p) + self.assertFalse(u'"' in p) + self.assertFalse(u'<' in p) + self.assertFalse(u'>' in p) + self.assertFalse(u'|' in p) + + def test_sanitize_windows_replaces_trailing_space(self): + with _common.platform_windows(): + p = util.sanitize_path(u'one/two /three') + self.assertFalse(u' ' in p) + + def test_sanitize_path_works_on_empty_string(self): + with _common.platform_posix(): + p = util.sanitize_path(u'') + self.assertEqual(p, u'') + + def test_sanitize_with_custom_replace_overrides_built_in_sub(self): + with _common.platform_posix(): + p = util.sanitize_path(u'a/.?/b', [ + (re.compile(r'foo'), u'bar'), + ]) + self.assertEqual(p, u'a/.?/b') + + def test_sanitize_with_custom_replace_adds_replacements(self): + with _common.platform_posix(): + p = util.sanitize_path(u'foo/bar', [ + (re.compile(r'foo'), u'bar'), + ]) + self.assertEqual(p, u'bar/bar') + + @unittest.skip(u'unimplemented: #359') + def test_sanitize_empty_component(self): + with _common.platform_posix(): + p = util.sanitize_path(u'foo//bar', [ + (re.compile(r'^$'), u'_'), + ]) + self.assertEqual(p, u'foo/_/bar') + + @patch('beets.util.subprocess.Popen') + def test_command_output(self, mock_popen): + def popen_fail(*args, **kwargs): + m = Mock(returncode=1) + m.communicate.return_value = u'foo', u'bar' + return m + + mock_popen.side_effect = popen_fail + with self.assertRaises(subprocess.CalledProcessError) as exc_context: + util.command_output([b"taga", b"\xc3\xa9"]) + self.assertEqual(exc_context.exception.returncode, 1) + self.assertEqual(exc_context.exception.cmd, b"taga \xc3\xa9") + + +class PathConversionTest(_common.TestCase): + def test_syspath_windows_format(self): + with _common.platform_windows(): + path = os.path.join(u'a', u'b', u'c') + outpath = util.syspath(path) + self.assertTrue(isinstance(outpath, unicode)) + self.assertTrue(outpath.startswith(u'\\\\?\\')) + + def test_syspath_windows_format_unc_path(self): + # The \\?\ prefix on Windows behaves differently with UNC + # (network share) paths. + path = '\\\\server\\share\\file.mp3' + with _common.platform_windows(): + outpath = util.syspath(path) + self.assertTrue(isinstance(outpath, unicode)) + self.assertEqual(outpath, u'\\\\?\\UNC\\server\\share\\file.mp3') + + def test_syspath_posix_unchanged(self): + with _common.platform_posix(): + path = os.path.join(u'a', u'b', u'c') + outpath = util.syspath(path) + self.assertEqual(path, outpath) + + def _windows_bytestring_path(self, path): + old_gfse = sys.getfilesystemencoding + sys.getfilesystemencoding = lambda: 'mbcs' + try: + with _common.platform_windows(): + return util.bytestring_path(path) + finally: + sys.getfilesystemencoding = old_gfse + + def test_bytestring_path_windows_encodes_utf8(self): + path = u'caf\xe9' + outpath = self._windows_bytestring_path(path) + self.assertEqual(path, outpath.decode('utf8')) + + def test_bytesting_path_windows_removes_magic_prefix(self): + path = u'\\\\?\\C:\\caf\xe9' + outpath = self._windows_bytestring_path(path) + self.assertEqual(outpath, u'C:\\caf\xe9'.encode('utf8')) + + +class PathTruncationTest(_common.TestCase): + def test_truncate_bytestring(self): + with _common.platform_posix(): + p = util.truncate_path(b'abcde/fgh', 4) + self.assertEqual(p, b'abcd/fgh') + + def test_truncate_unicode(self): + with _common.platform_posix(): + p = util.truncate_path(u'abcde/fgh', 4) + self.assertEqual(p, u'abcd/fgh') + + def test_truncate_preserves_extension(self): + with _common.platform_posix(): + p = util.truncate_path(u'abcde/fgh.ext', 5) + self.assertEqual(p, u'abcde/f.ext') + + +def suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff -Nru beets-1.3.8+dfsg/test/test_vfs.py beets-1.3.19/test/test_vfs.py --- beets-1.3.8+dfsg/test/test_vfs.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_vfs.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,8 +14,10 @@ # included in all copies or substantial portions of the Software. """Tests for the virtual filesystem builder..""" -import _common -from _common import unittest +from __future__ import division, absolute_import, print_function + +from test import _common +from test._common import unittest from beets import library from beets import vfs @@ -23,8 +26,8 @@ def setUp(self): super(VFSTest, self).setUp() self.lib = library.Library(':memory:', path_formats=[ - ('default', 'albums/$album/$title'), - ('singleton:true', 'tracks/$artist/$title'), + (u'default', u'albums/$album/$title'), + (u'singleton:true', u'tracks/$artist/$title'), ]) self.lib.add(_common.item()) self.lib.add_album([_common.item()]) diff -Nru beets-1.3.8+dfsg/test/test_web.py beets-1.3.19/test/test_web.py --- beets-1.3.8+dfsg/test/test_web.py 2014-09-12 23:00:33.000000000 +0000 +++ beets-1.3.19/test/test_web.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,11 +1,15 @@ +# -*- coding: utf-8 -*- + """Tests for the 'web' plugin""" -from _common import unittest -import _common +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test import _common import json import beetsplug from beets.library import Item, Album -beetsplug.__path__ = ['./beetsplug', '../beetsplug'] +beetsplug.__path__ = ['./beetsplug', '../beetsplug'] # noqa from beetsplug import web @@ -17,10 +21,10 @@ # Add fixtures for track in self.lib.items(): track.remove() - self.lib.add(Item(title='title', path='', id=1)) - self.lib.add(Item(title='another title', path='', id=2)) - self.lib.add(Album(album='album', id=3)) - self.lib.add(Album(album='another album', id=4)) + self.lib.add(Item(title=u'title', path='', id=1)) + self.lib.add(Item(title=u'another title', path='', id=2)) + self.lib.add(Album(album=u'album', id=3)) + self.lib.add(Album(album=u'another album', id=4)) web.app.config['TESTING'] = True web.app.config['lib'] = self.lib @@ -28,27 +32,27 @@ def test_get_all_items(self): response = self.client.get('/item/') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json['items']), 2) def test_get_single_item_by_id(self): response = self.client.get('/item/1') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(response.json['id'], 1) - self.assertEqual(response.json['title'], 'title') + self.assertEqual(response.json['title'], u'title') def test_get_multiple_items_by_id(self): response = self.client.get('/item/1,2') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json['items']), 2) response_titles = [item['title'] for item in response.json['items']] - self.assertItemsEqual(response_titles, ['title', 'another title']) + self.assertItemsEqual(response_titles, [u'title', u'another title']) def test_get_single_item_not_found(self): response = self.client.get('/item/3') @@ -56,46 +60,47 @@ def test_get_item_empty_query(self): response = self.client.get('/item/query/') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json['items']), 2) def test_get_simple_item_query(self): response = self.client.get('/item/query/another') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json['results']), 1) - self.assertEqual(response.json['results'][0]['title'], 'another title') + self.assertEqual(response.json['results'][0]['title'], + u'another title') def test_get_all_albums(self): response = self.client.get('/album/') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) response_albums = [album['album'] for album in response.json['albums']] - self.assertItemsEqual(response_albums, ['album', 'another album']) + self.assertItemsEqual(response_albums, [u'album', u'another album']) def test_get_single_album_by_id(self): response = self.client.get('/album/2') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(response.json['id'], 2) - self.assertEqual(response.json['album'], 'another album') + self.assertEqual(response.json['album'], u'another album') def test_get_multiple_albums_by_id(self): response = self.client.get('/album/1,2') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) response_albums = [album['album'] for album in response.json['albums']] - self.assertItemsEqual(response_albums, ['album', 'another album']) + self.assertItemsEqual(response_albums, [u'album', u'another album']) def test_get_album_empty_query(self): response = self.client.get('/album/query/') - response.json = json.loads(response.data) + response.json = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.json['albums']), 2) diff -Nru beets-1.3.8+dfsg/test/test_zero.py beets-1.3.19/test/test_zero.py --- beets-1.3.8+dfsg/test/test_zero.py 2014-09-18 02:01:05.000000000 +0000 +++ beets-1.3.19/test/test_zero.py 2016-06-20 01:53:12.000000000 +0000 @@ -1,7 +1,11 @@ +# -*- coding: utf-8 -*- + """Tests for the 'zero' plugin""" -from _common import unittest -from helper import TestHelper +from __future__ import division, absolute_import, print_function + +from test._common import unittest +from test.helper import TestHelper from beets.library import Item from beets import config @@ -19,7 +23,7 @@ def test_no_patterns(self): tags = { - 'comments': 'test comment', + 'comments': u'test comment', 'day': 13, 'month': 3, 'year': 2012, @@ -27,9 +31,9 @@ z = ZeroPlugin() z.debug = False z.fields = ['comments', 'month', 'day'] - z.patterns = {'comments': ['.'], - 'month': ['.'], - 'day': ['.']} + z.patterns = {'comments': [u'.'], + 'month': [u'.'], + 'day': [u'.']} z.write_event(None, None, tags) self.assertEqual(tags['comments'], None) self.assertEqual(tags['day'], None) @@ -40,11 +44,11 @@ z = ZeroPlugin() z.debug = False z.fields = ['comments', 'year'] - z.patterns = {'comments': 'eac lame'.split(), - 'year': '2098 2099'.split()} + z.patterns = {'comments': u'eac lame'.split(), + 'year': u'2098 2099'.split()} tags = { - 'comments': 'from lame collection, ripped by eac', + 'comments': u'from lame collection, ripped by eac', 'year': 2012, } z.write_event(None, None, tags) @@ -73,15 +77,49 @@ def test_do_not_change_database(self): item = self.add_item_fixture(year=2000) + item.write() mediafile = MediaFile(item.path) + self.assertEqual(2000, mediafile.year) config['zero'] = {'fields': ['year']} self.load_plugins('zero') item.write() + mediafile = MediaFile(item.path) self.assertEqual(item['year'], 2000) self.assertIsNone(mediafile.year) + def test_change_database(self): + item = self.add_item_fixture(year=2000) + item.write() + mediafile = MediaFile(item.path) + self.assertEqual(2000, mediafile.year) + + config['zero'] = { + 'fields': [u'year'], + 'update_database': True, + } + self.load_plugins('zero') + + item.write() + mediafile = MediaFile(item.path) + self.assertEqual(item['year'], 0) + self.assertIsNone(mediafile.year) + + def test_album_art(self): + path = self.create_mediafile_fixture(images=['jpg']) + item = Item.from_path(path) + + mediafile = MediaFile(item.path) + self.assertNotEqual(0, len(mediafile.images)) + + config['zero'] = {'fields': [u'images']} + self.load_plugins('zero') + + item.write() + mediafile = MediaFile(item.path) + self.assertEqual(0, len(mediafile.images)) + def suite(): return unittest.TestLoader().loadTestsFromName(__name__)