Message ID | 1454692880-14154-2-git-send-email-stephen.finucane@intel.com |
---|---|
State | Accepted |
Headers | show |
On 05 Feb 17:21, Stephen Finucane wrote: > From: Adam Jackson <ajax@redhat.com> > > This is most useful when listing patches, as it lets you get the most > recent results instead of the oldest. > > v2: Resolve "Negative indexing is not supported." issue and include > unit tests to ensure this doesn't regress. Bump API version to > 1.2 > > Signed-off-by: Adam Jackson <ajax@redhat.com> > Signed-off-by: Stephen Finucane <stephen.finucane@intel.com> Merged.
On Fri, Feb 05, 2016 at 05:21:20PM +0000, Stephen Finucane wrote: > @@ -602,6 +607,8 @@ def patch_list(filt=None): > > if max_count > 0: > return list(map(patch_to_dict, patches[:max_count])) > + elif max_count < 0: > + return list(map(patch_to_dict, patches[len(patches)+max_count:])) This will do something you probably don't want to happen: - len(patches) will actually retrieve all patches in memory and return the length from that data, - the slicing operation will then happen without querying the DB and slice the results cached from the len() evaluation. You can use 2 queries to not have to load all patches: patches[patches.count() + max_count:] Alternatively, it's possible to use reverse() to not have to depend on querying the number of items beforehand (which is what I did in my version of the patch) and reverse the list once again once we retrieved the elements. Given that we already go through the list of patches to map each patch to a dict, iterating in reverse order doesn't add extra cost. Something only those lines maybe? query = patches.reverse()[:-max_count] return [patch_to_dict(patch) for patch in reversed(query)]
On 31 Mar 15:08, Damien Lespiau wrote: > On Fri, Feb 05, 2016 at 05:21:20PM +0000, Stephen Finucane wrote: > > @@ -602,6 +607,8 @@ def patch_list(filt=None): > > > > if max_count > 0: > > return list(map(patch_to_dict, patches[:max_count])) > > + elif max_count < 0: > > + return list(map(patch_to_dict, patches[len(patches)+max_count:])) > > This will do something you probably don't want to happen: > > - len(patches) will actually retrieve all patches in memory and return > the length from that data, > > - the slicing operation will then happen without querying the DB and > slice the results cached from the len() evaluation. > > You can use 2 queries to not have to load all patches: > > patches[patches.count() + max_count:] > > Alternatively, it's possible to use reverse() to not have to depend on > querying the number of items beforehand (which is what I did in my > version of the patch) and reverse the list once again once we retrieved > the elements. Given that we already go through the list of patches to > map each patch to a dict, iterating in reverse order doesn't add extra > cost. Something only those lines maybe? > > query = patches.reverse()[:-max_count] > return [patch_to_dict(patch) for patch in reversed(query)] Yup, this is an oversight. If you (or anyone else) would like to submit a patch then please do so. I've added this to the bug tracker [1]. Stephen [1] https://github.com/getpatchwork/patchwork/issues/35
diff --git a/patchwork/bin/pwclient b/patchwork/bin/pwclient index 58f2338..c114ed1 100755 --- a/patchwork/bin/pwclient +++ b/patchwork/bin/pwclient @@ -408,7 +408,12 @@ def main(): filter_parser.add_argument( '-n', metavar='MAX#', type=int, - help='''Restrict number of results''' + help='''Return first n results''' + ) + filter_parser.add_argument( + '-N', metavar='MAX#', + type=int, + help='''Return last N results''' ) filter_parser.add_argument( '-m', metavar='MESSAGEID', @@ -556,6 +561,12 @@ def main(): except: action_parser.error("Invalid maximum count '%s'" % args.get('n')) + if args.get('N') != None: + try: + filt.add("max_count", 0 - args.get('N')) + except: + action_parser.error("Invalid maximum count '%s'" % args.get('N')) + do_signoff = args.get('signoff') # grab settings from config files diff --git a/patchwork/tests/test_xmlrpc.py b/patchwork/tests/test_xmlrpc.py index c6f19bf..454e22a 100644 --- a/patchwork/tests/test_xmlrpc.py +++ b/patchwork/tests/test_xmlrpc.py @@ -76,3 +76,9 @@ class XMLRPCTest(LiveServerTestCase): patches = self.rpc.patch_list({'max_count': 2}) self.assertEqual(len(patches), 2) self.assertEqual(patches[0]['id'], patch_objs[0].id) + + def testListNegativeMaxCount(self): + patch_objs = self._createPatches(5) + patches = self.rpc.patch_list({'max_count': -1}) + self.assertEqual(len(patches), 1) + self.assertEqual(patches[-1]['id'], patch_objs[0].id) diff --git a/patchwork/views/xmlrpc.py b/patchwork/views/xmlrpc.py index 86abd6d..ae5016c 100644 --- a/patchwork/views/xmlrpc.py +++ b/patchwork/views/xmlrpc.py @@ -379,7 +379,7 @@ def pw_rpc_version(): Returns: Version of the API. """ - return (1, 1, 0) + return (1, 2, 0) @xmlrpc_method() @@ -407,6 +407,9 @@ def project_list(search_str=None, max_count=0): if max_count > 0: return list(map(project_to_dict, projects[:max_count])) + elif max_count < 0: + return list(map(project_to_dict, + projects[len(projects)+max_count:])) else: return list(map(project_to_dict, projects)) except Project.DoesNotExist: @@ -459,6 +462,8 @@ def person_list(search_str=None, max_count=0): if max_count > 0: return list(map(person_to_dict, people[:max_count])) + elif max_count < 0: + return list(map(person_to_dict, people[len(people)+max_count:])) else: return list(map(person_to_dict, people)) except Person.DoesNotExist: @@ -602,6 +607,8 @@ def patch_list(filt=None): if max_count > 0: return list(map(patch_to_dict, patches[:max_count])) + elif max_count < 0: + return list(map(patch_to_dict, patches[len(patches)+max_count:])) else: return list(map(patch_to_dict, patches)) except Patch.DoesNotExist: @@ -795,6 +802,8 @@ def state_list(search_str=None, max_count=0): if max_count > 0: return list(map(state_to_dict, states[:max_count])) + elif max_count < 0: + return list(map(state_to_dict, states[len(states)+max_count:])) else: return list(map(state_to_dict, states)) except State.DoesNotExist: