--- urlgrabber-3.1.0.orig/debian/urlgrabber.1 +++ urlgrabber-3.1.0/debian/urlgrabber.1 @@ -0,0 +1,133 @@ +.\" Title: urlgrabber +.\" Author: +.\" Generator: DocBook XSL Stylesheets v1.72.0 +.\" Date: 04/09/2007 +.\" Manual: +.\" Source: +.\" +.TH "URLGRABBER" "1" "04/09/2007" "" "" +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.SH "NAME" +urlgrabber \- a high\-level cross\-protocol url\-grabber. +.SH "SYNOPSIS" +\fIurlgrabber\fR [OPTIONS] URL [FILE] +.sp +.SH "DESCRIPTION" +urlgrabber is a binary program and python module for fetching files. It is designed to be used in programs that need common (but not necessarily simple) url\-fetching features. +.sp +.SH "OPTIONS" +.PP +\-\-help, \-h +.RS 4 +help page specifying available options to the binary program. +.RE +.PP +\-\-copy\-local +.RS 4 +ignored except for file:// urls, in which case it specifies whether urlgrab should still make a copy of the file, or simply point to the existing copy. +.RE +.PP +\-\-throttle=NUMBER +.RS 4 +if it's an int, it's the bytes/second throttle limit. If it's a float, it is first multiplied by bandwidth. If throttle == 0, throttling is disabled. If None, the module\-level default (which can be set with set_throttle) is used. +.RE +.PP +\-\-bandwidth=NUMBER +.RS 4 +the nominal max bandwidth in bytes/second. If throttle is a float and bandwidth == 0, throttling is disabled. If None, the module\-level default (which can be set with set_bandwidth) is used. +.RE +.PP +\-\-range=RANGE +.RS 4 +a tuple of the form first_byte,last_byte describing a byte range to retrieve. Either or both of the values may be specified. If first_byte is None, byte offset 0 is assumed. If last_byte is None, the last byte available is assumed. Note that both first and last_byte values are inclusive so a range of (10,11) would return the 10th and 11th bytes of the resource. +.RE +.PP +\-\-user\-agent=STR +.RS 4 +the user\-agent string provide if the url is HTTP. +.RE +.PP +\-\-retry=NUMBER +.RS 4 +the number of times to retry the grab before bailing. If this is zero, it will retry forever. This was intentional\&... really, it was :). If this value is not supplied or is supplied but is None retrying does not occur. +.RE +.PP +\-\-retrycodes +.RS 4 +a sequence of errorcodes (values of e.errno) for which it should retry. See the doc on URLGrabError for more details on this. retrycodes defaults to \-1,2,4,5,6,7 if not specified explicitly. +.RE +.SH "MODULE USE EXAMPLES" +In its simplest form, urlgrabber can be a replacement for urllib2's open, or even python's file if you're just reading: +.sp +.RS 4 +.nf + from urlgrabber import urlopen + fo = urlopen(url) + data = fo.read() + fo.close() +.fi +.sp +.RE +Here, the url can be http, https, ftp, or file. It's also pretty smart so if you just give it something like /tmp/foo, it will figure it out. For even more fun, you can also do: +.sp +.RS 4 +.nf + from urlgrabber import urlopen + local_filename = urlgrab(url) # grab a local copy of the file + data = urlread(url) # just read the data into a string +.fi +.sp +.RE +Now, like urllib2, what's really happening here is that you're using a module\-level object (called a grabber) that kind of serves as a default. That's just fine, but you might want to get your own private version for a couple of reasons: +.sp +.RS 4 +.nf +* it's a little ugly to modify the default grabber because you have to + reach into the module to do it +* you could run into conflicts if different parts of the code + modify the default grabber and therefore expect different + behavior +.fi +.sp +.RE +Therefore, you're probably better off making your own. This also gives you lots of flexibility for later, as you'll see: +.sp +.RS 4 +.nf + from urlgrabber.grabber import URLGrabber + g = URLGrabber() + data = g.urlread(url) +.fi +.sp +.RE +This is nice because you can specify options when you create the grabber. For example, let's turn on simple reget mode so that if we have part of a file, we only need to fetch the rest: +.sp +.RS 4 +.nf + from urlgrabber.grabber import URLGrabber + g = URLGrabber(reget='simple') + local_filename = g.urlgrab(url) +.fi +.sp +.RE +The available options are listed in the module documentation, and can usually be specified as a default at the grabber\-level or as options to the method: +.sp +.RS 4 +.nf +from urlgrabber.grabber import URLGrabber +g = URLGrabber(reget='simple') +local_filename = g.urlgrab(url, filename=None, reget=None) +.fi +.sp +.RE +.SH "AUTHORS" +Written by: Michael D. Stenner Ryan Tomayko +.sp +This manual page was written by Kevin Coyner for the Debian system (but may be used by others). It borrows heavily on the documentation included in the urlgrabber module. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 2 any later version published by the Free Software Foundation. +.sp +.SH "RESOURCES" +Main web site: \fIhttp://linux.duke.edu/projects/urlgrabber/\fR +.sp --- urlgrabber-3.1.0.orig/debian/watch +++ urlgrabber-3.1.0/debian/watch @@ -0,0 +1,2 @@ +version=3 +http://linux.duke.edu/projects/urlgrabber/download/urlgrabber-(.*)\.tar\.gz --- urlgrabber-3.1.0.orig/debian/control +++ urlgrabber-3.1.0/debian/control @@ -0,0 +1,28 @@ +Source: urlgrabber +Section: python +Priority: optional +Maintainer: Ubuntu Core Developers +XSBC-Original-Maintainer: Kevin Coyner +Uploaders: Debian Python Modules Team +Build-Depends: debhelper (>= 5), python-all-dev (>= 2.3.5-11), python-support (>= 0.6), dpatch +Standards-Version: 3.7.2 +XS-Vcs-Svn: svn://svn.debian.org/python-modules/packages/urlgrabber/trunk/ +XS-Vcs-Browser: http://svn.debian.org/wsvn/python-modules/packages/urlgrabber/trunk/?op=log + +Package: python-urlgrabber +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends} +Provides: ${python:Provides} +Description: A high-level cross-protocol url-grabber + urlgrabber dramatically simplifies the fetching of files. It is designed to + be used in programs that need common (but not necessarily simple) + url-fetching features. This package provides both a binary and a module, both + of the name urlgrabber. + . + It supports identical behavior for http://, ftp:// and file:/// URIs. It + provides HTTP keepalive, byte ranges, regets, progress meters, throttling, + retries, access to authenticated http/ftp servers, and proxies. Additionally + it has the ability to treat a list of mirrors as a single source and to + automatically switch mirrors if there is a failure. + . + Homepage: http://linux.duke.edu/projects/urlgrabber/ --- urlgrabber-3.1.0.orig/debian/compat +++ urlgrabber-3.1.0/debian/compat @@ -0,0 +1 @@ +5 --- urlgrabber-3.1.0.orig/debian/urlgrabber.xml +++ urlgrabber-3.1.0/debian/urlgrabber.xml @@ -0,0 +1,201 @@ + + + + +urlgrabber +1 + + + urlgrabber + a high-level cross-protocol url-grabber. + + +urlgrabber [OPTIONS] URL [FILE] + + + +DESCRIPTION +urlgrabber is a binary program and python module for fetching files. It is +designed to be used in programs that need common (but not necessarily simple) +url-fetching features. + + + +OPTIONS + + + +--help, -h + + + + help page specifying available options to the binary program. + + + + + +--copy-local + + + + ignored except for file:// urls, in which case + it specifies whether urlgrab should still make + a copy of the file, or simply point to the + existing copy. + + + + + +--throttle=NUMBER + + + + if it's an int, it's the bytes/second throttle + limit. If it's a float, it is first multiplied + by bandwidth. If throttle == 0, throttling is + disabled. If None, the module-level default + (which can be set with set_throttle) is used. + + + + + +--bandwidth=NUMBER + + + + the nominal max bandwidth in bytes/second. If + throttle is a float and bandwidth == 0, + throttling is disabled. If None, the + module-level default (which can be set with + set_bandwidth) is used. + + + + + +--range=RANGE + + + + a tuple of the form first_byte,last_byte + describing a byte range to retrieve. Either or + both of the values may be specified. If + first_byte is None, byte offset 0 is assumed. + If last_byte is None, the last byte available + is assumed. Note that both first and last_byte + values are inclusive so a range of (10,11) + would return the 10th and 11th bytes of the + resource. + + + + + +--user-agent=STR + + + + the user-agent string provide if the url is HTTP. + + + + + +--retry=NUMBER + + + + the number of times to retry the grab before + bailing. If this is zero, it will retry + forever. This was intentional… really, it was + :). If this value is not supplied or is supplied + but is None retrying does not occur. + + + + + +--retrycodes + + + + a sequence of errorcodes (values of e.errno) for + which it should retry. See the doc on + URLGrabError for more details on this. retrycodes + defaults to -1,2,4,5,6,7 if not specified + explicitly. + + + + + + +MODULE USE EXAMPLES +In its simplest form, urlgrabber can be a replacement for urllib2's +open, or even python's file if you're just reading: + + from urlgrabber import urlopen + fo = urlopen(url) + data = fo.read() + fo.close() +Here, the url can be http, https, ftp, or file. It's also pretty smart +so if you just give it something like /tmp/foo, it will +figure it out. For even more fun, you can also do: + + from urlgrabber import urlopen + local_filename = urlgrab(url) # grab a local copy of the file + data = urlread(url) # just read the data into a string +Now, like urllib2, what's really happening here is that you're using a +module-level object (called a grabber) that kind of serves as a +default. That's just fine, but you might want to get your own private +version for a couple of reasons: + +* it's a little ugly to modify the default grabber because you have to + reach into the module to do it +* you could run into conflicts if different parts of the code + modify the default grabber and therefore expect different + behavior +Therefore, you're probably better off making your own. This also gives +you lots of flexibility for later, as you'll see: + + from urlgrabber.grabber import URLGrabber + g = URLGrabber() + data = g.urlread(url) +This is nice because you can specify options when you create the +grabber. For example, let's turn on simple reget mode so that if we +have part of a file, we only need to fetch the rest: + + from urlgrabber.grabber import URLGrabber + g = URLGrabber(reget='simple') + local_filename = g.urlgrab(url) +The available options are listed in the module documentation, and can +usually be specified as a default at the grabber-level or as options +to the method: + +from urlgrabber.grabber import URLGrabber +g = URLGrabber(reget='simple') +local_filename = g.urlgrab(url, filename=None, reget=None) + + +AUTHORS +Written by: +Michael D. Stenner <mstenner@linux.duke.edu> +Ryan Tomayko <rtomayko@naeblis.cx> + +This manual page was written by Kevin Coyner <kevin@rustybear.com> for +the Debian system (but may be used by others). It borrows heavily on +the documentation included in the urlgrabber module. Permission is granted +to copy, distribute and/or modify this document under the terms of +the GNU General Public License, Version 2 any later version published +by the Free Software Foundation. + + + +RESOURCES +Main web site: http://linux.duke.edu/projects/urlgrabber/ + + + --- urlgrabber-3.1.0.orig/debian/postinst +++ urlgrabber-3.1.0/debian/postinst @@ -0,0 +1,16 @@ +#!/bin/sh +set -e + +if [ -d /usr/share/doc/urlgrabber-2.9.9 ]; then + rm -rf /usr/share/doc/urlgrabber-2.9.9 +fi + +if [ -d /usr/share/doc/urlgrabber-2.9.8 ]; then + rm -rf /usr/share/doc/urlgrabber-2.9.8 +fi + +if [ -d /usr/share/doc/urlgrabber-2.9.7 ]; then + rm -rf /usr/share/doc/urlgrabber-2.9.7 +fi + +#DEBHELPER# --- urlgrabber-3.1.0.orig/debian/dirs +++ urlgrabber-3.1.0/debian/dirs @@ -0,0 +1 @@ +usr/bin --- urlgrabber-3.1.0.orig/debian/rules +++ urlgrabber-3.1.0/debian/rules @@ -0,0 +1,96 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +include /usr/share/python/python.mk +PYVERS = $(shell pyversions -r) + +CFLAGS = -Wall -g + +ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) + CFLAGS += -O0 +else + CFLAGS += -O2 +endif + +configure: configure-stamp +configure-stamp: + dh_testdir + touch configure-stamp + +build: patch build-stamp + +build-stamp: configure-stamp + dh_testdir + for py in $(PYVERS); do \ + $$py setup.py build; \ + $$py test/runtests.py; \ + done + touch build-stamp + +clean: clean-patched unpatch + +clean-patched: + dh_testdir + dh_testroot + -rm -f build-stamp configure-stamp + for py in $(PYVERS); do \ + $$py setup.py clean; \ + done + python setup.py clean + find $(CURDIR) -name "*.pyc" -exec rm -f '{}' \; + dh_clean + +patch: patch-stamp +patch-stamp: + dpatch apply-all + dpatch cat-all >patch-stamp + +unpatch: + dpatch deapply-all + rm -rf patch-stamp debian/patched + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + # Add here commands to install the package into debian/urlgrabber. + for py in $(PYVERS); do \ + $$py setup.py install --root=$(CURDIR)/debian/python-urlgrabber $(py_setup_install_args); \ + done + # remove unneeded documents installed by setup.py + -rm -rf $(CURDIR)/debian/python-urlgrabber/usr/share/doc/urlgrabber-3* + # use an unversioned python interpreter + sed -ri 's,^#![[:space:]]*/usr/bin/python.\..,#! /usr/bin/python,' $(CURDIR)/debian/python-urlgrabber/usr/bin/* + +# Build architecture-independent files here. +binary-indep: build install + dh_testdir + dh_testroot + dh_installchangelogs ChangeLog + dh_installdocs + dh_installman debian/urlgrabber.1 + dh_link + dh_strip + dh_compress + dh_fixperms + dh_pysupport + dh_installdeb + dh_shlibdeps + dh_gencontrol + dh_md5sums + dh_builddeb + +# Build architecture-dependent files here. +binary-arch: build install + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure --- urlgrabber-3.1.0.orig/debian/README.Debian +++ urlgrabber-3.1.0/debian/README.Debian @@ -0,0 +1,7 @@ +urlgrabber for Debian +--------------------- + +The files keepalive.py and byterange.py are generic urllib2 extension modules +and can be used to add keepalive and range support to any urllib2 application. + + -- Kevin Coyner Fri, 6 Apr 2007 22:01:01 -0400 --- urlgrabber-3.1.0.orig/debian/changelog +++ urlgrabber-3.1.0/debian/changelog @@ -0,0 +1,78 @@ +urlgrabber (3.1.0-4ubuntu1) jaunty; urgency=low + + * debian/rules: + + Python 2.6 transition. + + Remove python version from the shebang line in the installed scripts. + * debian/patches/002_md5_deprecation.dpatch: + + Fix deprecation warning about the md5 module (lp: #336871). + + -- Michael Bienia Tue, 03 Mar 2009 13:20:12 +0100 + +urlgrabber (3.1.0-4) unstable; urgency=low + + * Patch to have urlgrabber.keepalive.HTTPHandler use Request.get_method() to + determine the appropriate HTTP method. Thanks to Jakub Wilk. + Closes: #433724 + * Changed maintainer e-mail to reflect new Debian account. + * Added dpatch as Build-Depends to debian/control. + + -- Kevin Coyner Sat, 04 Aug 2007 21:52:14 -0400 + +urlgrabber (3.1.0-3) unstable; urgency=low + + * debian/control: Added python modules packaging team to uploaders and added + VCS fields. + + -- Kevin Coyner Mon, 09 Apr 2007 19:27:36 -0600 + +urlgrabber (3.1.0-2) unstable; urgency=low + + * debian/control: Changed "Architecture: any" to all. + + -- Kevin Coyner Mon, 09 Apr 2007 15:20:02 -0600 + +urlgrabber (3.1.0-1) unstable; urgency=low + + * New upstream release. + * New maintainer. (Closes: #418095) + * Added man page. + * Cleaned up cruft in debian/rules. + * Rewrote debian/copyright. + * Cleaned up debian/control and added homepage. + * Added debian/README.Debian. + * Added debian/postinst to clean up unneeded docs that were inappropriately + added in previous versions. + * Removed unneeded debian/pycompat file. + + -- Kevin Coyner Fri, 06 Apr 2007 22:27:03 -0400 + +urlgrabber (2.9.9-1) unstable; urgency=low + + * New upstream release + * Apply Ana Beatriz Guerrero Lopez's patch to + * Update to new Python policy (Closes: #373402) + * Switch to standards version 3.7.2 + * Update to debhelper compat level 5 + * Thanks Ana! + + -- Anand Kumria Thu, 6 Jul 2006 09:16:37 +1000 + +urlgrabber (2.9.7-2) unstable; urgency=low + + * When I imported urlgrabber into bzr, I somehow lost a Build-Dep: on + python. Re-adding it so I can (Closes: #335340) + + -- Anand Kumria Sat, 31 Dec 2005 15:34:22 +1100 + +urlgrabber (2.9.7-1) unstable; urgency=low + + * New upstream release (Closes: #344934) + + -- Anand Kumria Sat, 31 Dec 2005 15:34:22 +1100 + +urlgrabber (2.9.6-1) unstable; urgency=low + + * Initial release (Closes: #312698) + + -- Anand Kumria Sun, 9 Oct 2005 13:06:55 +1000 + --- urlgrabber-3.1.0.orig/debian/copyright +++ urlgrabber-3.1.0/debian/copyright @@ -0,0 +1,32 @@ +This package was debianized by Anand Kumria on +Sun, 9 Oct 2005 13:06:55 +1000. + +It was downloaded from + +Upstream Authors: Michael D. Stenner + Ryan Tomayko + +Copyright: (c) 2002-2006 Michael D. Stenner + Ryan Tomayko + +License: + + This package is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2 of the License, or (at your + option) any later version. + + This package is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this package; if not, write to the Free Software Foundation, + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +On Debian systems, the complete text of the GNU Lesser General Public License +can be found in `/usr/share/common-licenses/LGPL'. + +The Debian packaging is (C) 2007, Kevin Coyner and is +licensed under the GPL, see `/usr/share/common-licenses/GPL'. --- urlgrabber-3.1.0.orig/debian/urlgrabber.txt +++ urlgrabber-3.1.0/debian/urlgrabber.txt @@ -0,0 +1,150 @@ +URLGRABBER(1) +============= + +NAME +---- +urlgrabber - a high-level cross-protocol url-grabber. + +SYNOPSIS +-------- +'urlgrabber' [OPTIONS] URL [FILE] + +DESCRIPTION +----------- +urlgrabber is a binary program and python module for fetching files. It is +designed to be used in programs that need common (but not necessarily simple) +url-fetching features. + +OPTIONS +------- +--help, -h:: + help page specifying available options to the binary program. + +--copy-local:: + ignored except for file:// urls, in which case + it specifies whether urlgrab should still make + a copy of the file, or simply point to the + existing copy. + +--throttle=NUMBER:: + if it's an int, it's the bytes/second throttle + limit. If it's a float, it is first multiplied + by bandwidth. If throttle == 0, throttling is + disabled. If None, the module-level default + (which can be set with set_throttle) is used. + +--bandwidth=NUMBER:: + the nominal max bandwidth in bytes/second. If + throttle is a float and bandwidth == 0, + throttling is disabled. If None, the + module-level default (which can be set with + set_bandwidth) is used. + +--range=RANGE:: + a tuple of the form first_byte,last_byte + describing a byte range to retrieve. Either or + both of the values may be specified. If + first_byte is None, byte offset 0 is assumed. + If last_byte is None, the last byte available + is assumed. Note that both first and last_byte + values are inclusive so a range of (10,11) + would return the 10th and 11th bytes of the + resource. + +--user-agent=STR:: + the user-agent string provide if the url is HTTP. + +--retry=NUMBER:: + the number of times to retry the grab before + bailing. If this is zero, it will retry + forever. This was intentional... really, it was + :). If this value is not supplied or is supplied + but is None retrying does not occur. + +--retrycodes:: + a sequence of errorcodes (values of e.errno) for + which it should retry. See the doc on + URLGrabError for more details on this. retrycodes + defaults to -1,2,4,5,6,7 if not specified + explicitly. + + +MODULE USE EXAMPLES +------------------- +In its simplest form, urlgrabber can be a replacement for urllib2's +open, or even python's file if you're just reading: +.................................. + from urlgrabber import urlopen + fo = urlopen(url) + data = fo.read() + fo.close() +.................................. + +Here, the url can be http, https, ftp, or file. It's also pretty smart +so if you just give it something like /tmp/foo, it will +figure it out. For even more fun, you can also do: + +.................................. + from urlgrabber import urlopen + local_filename = urlgrab(url) # grab a local copy of the file + data = urlread(url) # just read the data into a string +.................................. + +Now, like urllib2, what's really happening here is that you're using a +module-level object (called a grabber) that kind of serves as a +default. That's just fine, but you might want to get your own private +version for a couple of reasons: + +.................................. +* it's a little ugly to modify the default grabber because you have to + reach into the module to do it +* you could run into conflicts if different parts of the code + modify the default grabber and therefore expect different + behavior +.................................. + +Therefore, you're probably better off making your own. This also gives +you lots of flexibility for later, as you'll see: + +.................................. + from urlgrabber.grabber import URLGrabber + g = URLGrabber() + data = g.urlread(url) +.................................. + +This is nice because you can specify options when you create the +grabber. For example, let's turn on simple reget mode so that if we +have part of a file, we only need to fetch the rest: + +.................................. + from urlgrabber.grabber import URLGrabber + g = URLGrabber(reget='simple') + local_filename = g.urlgrab(url) +.................................. + +The available options are listed in the module documentation, and can +usually be specified as a default at the grabber-level or as options +to the method: + + from urlgrabber.grabber import URLGrabber + g = URLGrabber(reget='simple') + local_filename = g.urlgrab(url, filename=None, reget=None) + + +AUTHORS +------- +Written by: +Michael D. Stenner +Ryan Tomayko + +This manual page was written by Kevin Coyner for +the Debian system (but may be used by others). It borrows heavily on +the documentation included in the urlgrabber module. Permission is granted +to copy, distribute and/or modify this document under the terms of +the GNU General Public License, Version 2 any later version published +by the Free Software Foundation. + + +RESOURCES +--------- +Main web site: http://linux.duke.edu/projects/urlgrabber/[] --- urlgrabber-3.1.0.orig/debian/docs +++ urlgrabber-3.1.0/debian/docs @@ -0,0 +1 @@ +TODO --- urlgrabber-3.1.0.orig/debian/patches/00list +++ urlgrabber-3.1.0/debian/patches/00list @@ -0,0 +1,2 @@ +001_keepalive +002_md5_deprecation --- urlgrabber-3.1.0.orig/debian/patches/001_keepalive.dpatch +++ urlgrabber-3.1.0/debian/patches/001_keepalive.dpatch @@ -0,0 +1,40 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 001_keepalive.dpatch by Kevin Coyner +## +## DP: keepalive patch +## DP: Original patch provided by Jakub Wilk + +@DPATCH@ +diff -urNad urlgrabber-3.1.0~/urlgrabber/keepalive.py urlgrabber-3.1.0/urlgrabber/keepalive.py +--- urlgrabber-3.1.0~/urlgrabber/keepalive.py 2006-09-21 20:58:05.000000000 -0400 ++++ urlgrabber-3.1.0/urlgrabber/keepalive.py 2007-08-04 22:18:34.000000000 -0400 +@@ -302,16 +302,14 @@ + + def _start_transaction(self, h, req): + try: ++ h.putrequest(req.get_method(), req.get_selector()) + if req.has_data(): + data = req.get_data() +- h.putrequest('POST', req.get_selector()) + if not req.headers.has_key('Content-type'): + h.putheader('Content-type', + 'application/x-www-form-urlencoded') + if not req.headers.has_key('Content-length'): + h.putheader('Content-length', '%d' % len(data)) +- else: +- h.putrequest('GET', req.get_selector()) + except (socket.error, httplib.HTTPException), err: + raise urllib2.URLError(err) + +@@ -371,9 +369,9 @@ + + def __init__(self, sock, debuglevel=0, strict=0, method=None): + if method: # the httplib in python 2.3 uses the method arg +- httplib.HTTPResponse.__init__(self, sock, debuglevel, method) ++ httplib.HTTPResponse.__init__(self, sock, debuglevel, strict, method) + else: # 2.2 doesn't +- httplib.HTTPResponse.__init__(self, sock, debuglevel) ++ httplib.HTTPResponse.__init__(self, sock, debuglevel, strict) + self.fileno = sock.fileno + self.code = None + self._rbuf = '' --- urlgrabber-3.1.0.orig/debian/patches/002_md5_deprecation.dpatch +++ urlgrabber-3.1.0/debian/patches/002_md5_deprecation.dpatch @@ -0,0 +1,46 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 002_md5_deprecation.dpatch by Michael Bienia +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: Fix deprecation warning from python 2.6 about the md5 module + +@DPATCH@ +diff -urNad urlgrabber-3.1.0~/urlgrabber/keepalive.py urlgrabber-3.1.0/urlgrabber/keepalive.py +--- urlgrabber-3.1.0~/urlgrabber/keepalive.py 2006-09-22 02:58:05.000000000 +0200 ++++ urlgrabber-3.1.0/urlgrabber/keepalive.py 2009-03-03 12:54:14.000000000 +0100 +@@ -485,7 +485,7 @@ + keepalive_handler.close_all() + + def continuity(url): +- import md5 ++ from hashlib import md5 + format = '%25s: %s' + + # first fetch the file with the normal http handler +@@ -494,7 +494,7 @@ + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() +- m = md5.new(foo) ++ m = md5(foo) + print format % ('normal urllib', m.hexdigest()) + + # now install the keepalive handler and try again +@@ -504,7 +504,7 @@ + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() +- m = md5.new(foo) ++ m = md5(foo) + print format % ('keepalive read', m.hexdigest()) + + fo = urllib2.urlopen(url) +@@ -514,7 +514,7 @@ + if f: foo = foo + f + else: break + fo.close() +- m = md5.new(foo) ++ m = md5(foo) + print format % ('keepalive readline', m.hexdigest()) + + def comp(N, url):