diff -Nru pymongo-1.11/bson/binary.py pymongo-1.7/bson/binary.py --- pymongo-1.11/bson/binary.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/binary.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for representing BSON binary data. -""" - -BINARY_SUBTYPE = 0 -"""BSON binary subtype for binary data. - -This is becomming the default subtype and should be the most commonly -used. - -.. versionadded:: 1.5 -""" - -FUNCTION_SUBTYPE = 1 -"""BSON binary subtype for functions. - -.. versionadded:: 1.5 -""" - -OLD_BINARY_SUBTYPE = 2 -"""Old BSON binary subtype for binary data. - -This is still the default subtype, but that is changing to -:data:`BINARY_SUBTYPE`. - -.. versionadded:: 1.7 -""" - -UUID_SUBTYPE = 3 -"""BSON binary subtype for a UUID. - -:class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using this subtype. - -.. versionadded:: 1.5 -""" - -MD5_SUBTYPE = 5 -"""BSON binary subtype for an MD5 hash. - -.. versionadded:: 1.5 -""" - -USER_DEFINED_SUBTYPE = 128 -"""BSON binary subtype for any user defined structure. - -.. versionadded:: 1.5 -""" - - -class Binary(str): - """Representation of BSON binary data. - - This is necessary because we want to represent Python strings as - the BSON string type. We need to wrap binary data so we can tell - the difference between what should be considered binary data and - what should be considered a string when we encode to BSON. - - Raises TypeError if `data` is not an instance of str or `subtype` - is not an instance of int. Raises ValueError if `subtype` is not - in [0, 256). - - :Parameters: - - `data`: the binary data to represent - - `subtype` (optional): the `binary subtype - `_ - to use - """ - - def __new__(cls, data, subtype=OLD_BINARY_SUBTYPE): - if not isinstance(data, str): - raise TypeError("data must be an instance of str") - if not isinstance(subtype, int): - raise TypeError("subtype must be an instance of int") - if subtype >= 256 or subtype < 0: - raise ValueError("subtype must be contained in [0, 256)") - self = str.__new__(cls, data) - self.__subtype = subtype - return self - - @property - def subtype(self): - """Subtype of this binary data. - """ - return self.__subtype - - def __eq__(self, other): - if isinstance(other, Binary): - return (self.__subtype, str(self)) == (other.subtype, str(other)) - # We don't return NotImplemented here because if we did then - # Binary("foo") == "foo" would return True, since Binary is a - # subclass of str... - return False - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "Binary(%s, %s)" % (str.__repr__(self), self.__subtype) diff -Nru pymongo-1.11/bson/buffer.c pymongo-1.7/bson/buffer.c --- pymongo-1.11/bson/buffer.c 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/buffer.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "buffer.h" - -#define INITIAL_BUFFER_SIZE 256 - -struct buffer { - char* buffer; - int size; - int position; -}; - -/* Allocate and return a new buffer. - * Return NULL on allocation failure. */ -buffer_t buffer_new(void) { - buffer_t buffer; - buffer = (buffer_t)malloc(sizeof(struct buffer)); - if (buffer == NULL) { - return NULL; - } - - buffer->size = INITIAL_BUFFER_SIZE; - buffer->position = 0; - buffer->buffer = (char*)malloc(sizeof(char) * INITIAL_BUFFER_SIZE); - if (buffer->buffer == NULL) { - free(buffer); - return NULL; - } - - return buffer; -} - -/* Free the memory allocated for `buffer`. - * Return non-zero on failure. */ -int buffer_free(buffer_t buffer) { - if (buffer == NULL) { - return 1; - } - free(buffer->buffer); - free(buffer); - return 0; -} - -/* Grow `buffer` to at least `min_length`. - * Return non-zero on allocation failure. */ -static int buffer_grow(buffer_t buffer, int min_length) { - int size = buffer->size; - char* old_buffer = buffer->buffer; - if (size >= min_length) { - return 0; - } - while (size < min_length) { - size *= 2; - } - buffer->buffer = (char*)realloc(buffer->buffer, sizeof(char) * size); - if (buffer->buffer == NULL) { - free(old_buffer); - free(buffer); - return 1; - } - buffer->size = size; - return 0; -} - -/* Assure that `buffer` has at least `size` free bytes (and grow if needed). - * Return non-zero on allocation failure. */ -static int buffer_assure_space(buffer_t buffer, int size) { - if (buffer->position + size <= buffer->size) { - return 0; - } - return buffer_grow(buffer, buffer->position + size); -} - -/* Save `size` bytes from the current position in `buffer` (and grow if needed). - * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size) { - int position = buffer->position; - if (buffer_assure_space(buffer, size) != 0) { - return -1; - } - buffer->position += size; - return position; -} - -/* Write `size` bytes from `data` to `buffer` (and grow if needed). - * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size) { - if (buffer_assure_space(buffer, size) != 0) { - return 1; - } - - memcpy(buffer->buffer + buffer->position, data, size); - buffer->position += size; - return 0; -} - -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, - const char* data, int size) { - if (position + size > buffer->size) { - buffer_free(buffer); - return 1; - } - - memcpy(buffer->buffer + position, data, size); - return 0; -} - - -int buffer_get_position(buffer_t buffer) { - return buffer->position; -} - -char* buffer_get_buffer(buffer_t buffer) { - return buffer->buffer; -} diff -Nru pymongo-1.11/bson/buffer.h pymongo-1.7/bson/buffer.h --- pymongo-1.11/bson/buffer.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/buffer.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BUFFER_H -#define BUFFER_H - -/* Note: if any of these functions return a failure condition then the buffer - * has already been freed. */ - -/* A buffer */ -typedef struct buffer* buffer_t; -/* A position in the buffer */ -typedef int buffer_position; - -/* Allocate and return a new buffer. - * Return NULL on allocation failure. */ -buffer_t buffer_new(void); - -/* Free the memory allocated for `buffer`. - * Return non-zero on failure. */ -int buffer_free(buffer_t buffer); - -/* Save `size` bytes from the current position in `buffer` (and grow if needed). - * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size); - -/* Write `size` bytes from `data` to `buffer` (and grow if needed). - * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size); - -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, const char* data, int size); - -/* Getters for the internals of a buffer_t. - * Should try to avoid using these as much as possible - * since they break the abstraction. */ -buffer_position buffer_get_position(buffer_t buffer); -char* buffer_get_buffer(buffer_t buffer); - -#endif diff -Nru pymongo-1.11/bson/_cbson.h pymongo-1.7/bson/_cbson.h --- pymongo-1.11/bson/_cbson.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/_cbson.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _CBSON_H -#define _CBSON_H - -#include -#include -#include "buffer.h" - -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef unsigned int Py_ssize_t; -#define PY_SSIZE_T_MAX UINT_MAX -#define PY_SSIZE_T_MIN 0 -#endif - -int buffer_write_bytes(buffer_t buffer, const char* data, int size); - -int write_dict(buffer_t buffer, PyObject* dict, - unsigned char check_keys, unsigned char top_level); - -int write_pair(buffer_t buffer, const char* name, Py_ssize_t name_length, - PyObject* value, unsigned char check_keys, unsigned char allow_id); - -int decode_and_write_pair(buffer_t buffer, PyObject* key, PyObject* value, - unsigned char check_keys, unsigned char top_level); - -#endif diff -Nru pymongo-1.11/bson/_cbsonmodule.c pymongo-1.7/bson/_cbsonmodule.c --- pymongo-1.11/bson/_cbsonmodule.c 2011-04-08 21:30:20.000000000 +0000 +++ pymongo-1.7/bson/_cbsonmodule.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,1408 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * This file contains C implementations of some of the functions - * needed by the bson module. If possible, these implementations - * should be used to speed up BSON encoding and decoding. - */ - -#include -#include - -#include - -#include "_cbson.h" -#include "buffer.h" -#include "time64.h" -#include "encoding_helpers.h" - -static PyObject* Binary = NULL; -static PyObject* Code = NULL; -static PyObject* ObjectId = NULL; -static PyObject* DBRef = NULL; -static PyObject* RECompile = NULL; -static PyObject* UUID = NULL; -static PyObject* Timestamp = NULL; -static PyObject* MinKey = NULL; -static PyObject* MaxKey = NULL; -static PyObject* UTC = NULL; -static PyTypeObject* REType = NULL; - -#if PY_VERSION_HEX < 0x02050000 -#define WARN(category, message) \ - PyErr_Warn((category), (message)) -#else -#define WARN(category, message) \ - PyErr_WarnEx((category), (message), 1) -#endif - -/* Maximum number of regex flags */ -#define FLAGS_SIZE 7 - -#if defined(WIN32) || defined(_MSC_VER) -/* This macro is basically an implementation of asprintf for win32 - * We get the length of the int as string and malloc a buffer for it, - * returning -1 if that malloc fails. We then actually print to the - * buffer to get the string value as an int. Like asprintf, the result - * must be explicitly free'd when done being used. - */ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) -#define INT2STRING(buffer, i) \ - *(buffer) = malloc(_scprintf("%d", (i)) + 1), \ - (!(buffer) ? \ - -1 : \ - _snprintf_s(*(buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ - (i))) -#define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) -#else -#define INT2STRING(buffer, i) \ - *(buffer) = malloc(_scprintf("%d", (i)) + 1), \ - (!(buffer) ? \ - -1 : \ - _snprintf(*(buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ - (i))) -#define STRCAT(dest, n, src) strcat((dest), (src)) -#endif -#else -#define INT2STRING(buffer, i) asprintf((buffer), "%d", (i)) -#define STRCAT(dest, n, src) strcat((dest), (src)) -#endif - - -static PyObject* elements_to_dict(const char* string, int max, - PyObject* as_class, unsigned char tz_aware); - -static int _write_element_to_buffer(buffer_t buffer, int type_byte, PyObject* value, - unsigned char check_keys, unsigned char first_attempt); - -/* Date stuff */ -static PyObject* datetime_from_millis(long long millis) { - int microseconds = (millis % 1000) * 1000; - Time64_T seconds = millis / 1000; - struct TM timeinfo; - gmtime64_r(&seconds, &timeinfo); - - return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, - timeinfo.tm_mon + 1, - timeinfo.tm_mday, - timeinfo.tm_hour, - timeinfo.tm_min, - timeinfo.tm_sec, - microseconds); -} - -static long long millis_from_datetime(PyObject* datetime) { - struct TM timeinfo; - long long millis; - - timeinfo.tm_year = PyDateTime_GET_YEAR(datetime) - 1900; - timeinfo.tm_mon = PyDateTime_GET_MONTH(datetime) - 1; - timeinfo.tm_mday = PyDateTime_GET_DAY(datetime); - timeinfo.tm_hour = PyDateTime_DATE_GET_HOUR(datetime); - timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); - timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - - millis = timegm64(&timeinfo) * 1000; - millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; - return millis; -} - -/* Just make this compatible w/ the old API. */ -int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (buffer_write(buffer, data, size)) { - PyErr_NoMemory(); - return 0; - } - return 1; -} - -/* returns 0 on failure */ -static int write_string(buffer_t buffer, PyObject* py_string) { - Py_ssize_t string_length; - const char* string = PyString_AsString(py_string); - if (!string) { - return 1; - } - string_length = PyString_Size(py_string) + 1; - - if (!buffer_write_bytes(buffer, (const char*)&string_length, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, string, string_length)) { - return 0; - } - return 1; -} - -/* Get an error class from the bson.errors module. - * - * Returns a new ref */ -static PyObject* _error(char* name) { - PyObject* error; - PyObject* errors = PyImport_ImportModule("bson.errors"); - if (!errors) { - return NULL; - } - error = PyObject_GetAttrString(errors, name); - Py_DECREF(errors); - return error; -} - -/* Reload a cached Python object. - * - * Returns non-zero on failure. */ -static int _reload_object(PyObject** object, char* module_name, char* object_name) { - PyObject* module; - - module = PyImport_ImportModule(module_name); - if (!module) { - return 1; - } - - *object = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - - return (*object) ? 0 : 2; -} - -/* Reload all cached Python objects. - * - * Returns non-zero on failure. */ -static int _reload_python_objects(void) { - if (_reload_object(&Binary, "bson.binary", "Binary") || - _reload_object(&Code, "bson.code", "Code") || - _reload_object(&ObjectId, "bson.objectid", "ObjectId") || - _reload_object(&DBRef, "bson.dbref", "DBRef") || - _reload_object(&Timestamp, "bson.timestamp", "Timestamp") || - _reload_object(&MinKey, "bson.min_key", "MinKey") || - _reload_object(&MaxKey, "bson.max_key", "MaxKey") || - _reload_object(&UTC, "bson.tz_util", "utc") || - _reload_object(&RECompile, "re", "compile")) { - return 1; - } - /* If we couldn't import uuid then we must be on 2.4. Just ignore. */ - if (_reload_object(&UUID, "uuid", "UUID") == 1) { - UUID = NULL; - PyErr_Clear(); - } - /* Reload our REType hack too. */ - REType = PyObject_CallFunction(RECompile, "O", - PyString_FromString(""))->ob_type; - return 0; -} - -static int write_element_to_buffer(buffer_t buffer, int type_byte, - PyObject* value, unsigned char check_keys, - unsigned char first_attempt) { - int result; - if(Py_EnterRecursiveCall(" while encoding an object to BSON ")) - return 0; - result = _write_element_to_buffer(buffer, type_byte, value, - check_keys, first_attempt); - Py_LeaveRecursiveCall(); - return result; -} - -/* TODO our platform better be little-endian w/ 4-byte ints! */ -/* Write a single value to the buffer (also write it's type_byte, for which - * space has already been reserved. - * - * returns 0 on failure */ -static int _write_element_to_buffer(buffer_t buffer, int type_byte, PyObject* value, - unsigned char check_keys, unsigned char first_attempt) { - if (PyBool_Check(value)) { - const long bool = PyInt_AsLong(value); - const char c = bool ? 0x01 : 0x00; - *(buffer_get_buffer(buffer) + type_byte) = 0x08; - return buffer_write_bytes(buffer, &c, 1); - } - else if (PyInt_Check(value)) { - const long long_value = PyInt_AsLong(value); - const int int_value = (int)long_value; - if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ - long long long_long_value; - PyErr_Clear(); - long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_bytes(buffer, (const char*)&long_long_value, 8); - } - *(buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_bytes(buffer, (const char*)&int_value, 4); - } else if (PyLong_Check(value)) { - const long long long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_bytes(buffer, (const char*)&long_long_value, 8); - } else if (PyFloat_Check(value)) { - const double d = PyFloat_AsDouble(value); - *(buffer_get_buffer(buffer) + type_byte) = 0x01; - return buffer_write_bytes(buffer, (const char*)&d, 8); - } else if (value == Py_None) { - *(buffer_get_buffer(buffer) + type_byte) = 0x0A; - return 1; - } else if (PyDict_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x03; - return write_dict(buffer, value, check_keys, 0); - } else if (PyList_Check(value) || PyTuple_Check(value)) { - int start_position, - length_location, - items, - length, - i; - char zero = 0; - - *(buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = buffer_get_position(buffer); - - /* save space for length */ - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - return 0; - } - - items = PySequence_Size(value); - for(i = 0; i < items; i++) { - int list_type_byte = buffer_save_space(buffer, 1); - char* name; - PyObject* item_value; - - if (list_type_byte == -1) { - PyErr_NoMemory(); - return 0; - } - if (INT2STRING(&name, i) < 0 || !name) { - PyErr_NoMemory(); - return 0; - } - if (!buffer_write_bytes(buffer, name, strlen(name) + 1)) { - free(name); - return 0; - } - free(name); - - item_value = PySequence_GetItem(value, i); - if (!write_element_to_buffer(buffer, list_type_byte, item_value, check_keys, 1)) { - Py_DECREF(item_value); - return 0; - } - Py_DECREF(item_value); - } - - /* write null byte and fill in length */ - if (!buffer_write_bytes(buffer, &zero, 1)) { - return 0; - } - length = buffer_get_position(buffer) - start_position; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); - return 1; - } else if (PyObject_IsInstance(value, Binary)) { - PyObject* subtype_object; - - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); - if (!subtype_object) { - return 0; - } - { - const long long_subtype = PyInt_AsLong(subtype_object); - const char subtype = (const char)long_subtype; - const int length = PyString_Size(value); - - Py_DECREF(subtype_object); - if (subtype == 2) { - const int other_length = length + 4; - if (!buffer_write_bytes(buffer, (const char*)&other_length, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - } - if (!buffer_write_bytes(buffer, (const char*)&length, 4)) { - return 0; - } - if (subtype != 2) { - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - } - { - const char* string = PyString_AsString(value); - if (!string) { - return 0; - } - if (!buffer_write_bytes(buffer, string, length)) { - return 0; - } - } - } - return 1; - } else if (UUID && PyObject_IsInstance(value, UUID)) { - // Just a special case of Binary above, but simpler to do as a separate case - - // UUID is always 16 bytes, subtype 3 - int length = 16; - const char subtype = 3; - - PyObject* bytes; - - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_bytes(buffer, (const char*)&length, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - - bytes = PyObject_GetAttrString(value, "bytes"); - if (!bytes) { - return 0; - } - if (!buffer_write_bytes(buffer, PyString_AsString(bytes), length)) { - Py_DECREF(bytes); - return 0; - } - Py_DECREF(bytes); - return 1; - } else if (PyObject_IsInstance(value, Code)) { - int start_position, - length_location, - length; - PyObject* scope; - - *(buffer_get_buffer(buffer) + type_byte) = 0x0F; - - start_position = buffer_get_position(buffer); - /* save space for length */ - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - return 0; - } - - if (!write_string(buffer, value)) { - return 0; - } - - scope = PyObject_GetAttrString(value, "scope"); - if (!scope) { - return 0; - } - if (!write_dict(buffer, scope, 0, 0)) { - Py_DECREF(scope); - return 0; - } - Py_DECREF(scope); - - length = buffer_get_position(buffer) - start_position; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); - return 1; - } else if (PyString_Check(value)) { - int result; - result_t status; - - *(buffer_get_buffer(buffer) + type_byte) = 0x02; - status = check_string((const unsigned char*)PyString_AsString(value), - PyString_Size(value), 1, 0); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - PyErr_SetString(InvalidStringData, - "strings in documents must be valid UTF-8"); - Py_DECREF(InvalidStringData); - return 0; - } - result = write_string(buffer, value); - return result; - } else if (PyUnicode_Check(value)) { - PyObject* encoded; - int result; - - *(buffer_get_buffer(buffer) + type_byte) = 0x02; - encoded = PyUnicode_AsUTF8String(value); - if (!encoded) { - return 0; - } - result = write_string(buffer, encoded); - Py_DECREF(encoded); - return result; - } else if (PyDateTime_Check(value)) { - long long millis; - PyObject* utcoffset = PyObject_CallMethod(value, "utcoffset", NULL); - if (utcoffset != Py_None) { - PyObject* result = PyNumber_Subtract(value, utcoffset); - Py_DECREF(utcoffset); - if (!result) { - return 0; - } - millis = millis_from_datetime(result); - Py_DECREF(result); - } else { - millis = millis_from_datetime(value); - } - *(buffer_get_buffer(buffer) + type_byte) = 0x09; - return buffer_write_bytes(buffer, (const char*)&millis, 8); - } else if (PyObject_IsInstance(value, ObjectId)) { - PyObject* pystring = PyObject_GetAttrString(value, "_ObjectId__id"); - if (!pystring) { - return 0; - } - { - const char* as_string = PyString_AsString(pystring); - if (!as_string) { - Py_DECREF(pystring); - return 0; - } - if (!buffer_write_bytes(buffer, as_string, 12)) { - Py_DECREF(pystring); - return 0; - } - Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x07; - } - return 1; - } else if (PyObject_IsInstance(value, DBRef)) { - PyObject* as_doc = PyObject_CallMethod(value, "as_doc", NULL); - if (!as_doc) { - return 0; - } - if (!write_dict(buffer, as_doc, 0, 0)) { - Py_DECREF(as_doc); - return 0; - } - Py_DECREF(as_doc); - *(buffer_get_buffer(buffer) + type_byte) = 0x03; - return 1; - } else if (PyObject_IsInstance(value, Timestamp)) { - PyObject* obj; - long i; - - obj = PyObject_GetAttrString(value, "inc"); - if (!obj) { - return 0; - } - i = PyInt_AsLong(obj); - Py_DECREF(obj); - if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { - return 0; - } - - obj = PyObject_GetAttrString(value, "time"); - if (!obj) { - return 0; - } - i = PyInt_AsLong(obj); - Py_DECREF(obj); - if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { - return 0; - } - - *(buffer_get_buffer(buffer) + type_byte) = 0x11; - return 1; - } - else if (PyObject_TypeCheck(value, REType)) { - PyObject* py_flags = PyObject_GetAttrString(value, "flags"); - PyObject* py_pattern; - PyObject* encoded_pattern; - long int_flags; - char flags[FLAGS_SIZE]; - char check_utf8 = 0; - int pattern_length, - flags_length; - result_t status; - - if (!py_flags) { - return 0; - } - int_flags = PyInt_AsLong(py_flags); - Py_DECREF(py_flags); - py_pattern = PyObject_GetAttrString(value, "pattern"); - if (!py_pattern) { - return 0; - } - - if (PyUnicode_Check(py_pattern)) { - encoded_pattern = PyUnicode_AsUTF8String(py_pattern); - Py_DECREF(py_pattern); - if (!encoded_pattern) { - return 0; - } - } else { - encoded_pattern = py_pattern; - check_utf8 = 1; - } - - status = check_string((const unsigned char*)PyString_AsString(encoded_pattern), - PyString_Size(encoded_pattern), check_utf8, 1); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - return 0; - } else if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyErr_SetString(InvalidDocument, - "regex patterns must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - return 0; - } - - { - const char* pattern = PyString_AsString(encoded_pattern); - pattern_length = strlen(pattern) + 1; - - if (!buffer_write_bytes(buffer, pattern, pattern_length)) { - Py_DECREF(encoded_pattern); - return 0; - } - } - Py_DECREF(encoded_pattern); - - flags[0] = 0; - /* TODO don't hardcode these */ - if (int_flags & 2) { - STRCAT(flags, FLAGS_SIZE, "i"); - } - if (int_flags & 4) { - STRCAT(flags, FLAGS_SIZE, "l"); - } - if (int_flags & 8) { - STRCAT(flags, FLAGS_SIZE, "m"); - } - if (int_flags & 16) { - STRCAT(flags, FLAGS_SIZE, "s"); - } - if (int_flags & 32) { - STRCAT(flags, FLAGS_SIZE, "u"); - } - if (int_flags & 64) { - STRCAT(flags, FLAGS_SIZE, "x"); - } - flags_length = strlen(flags) + 1; - if (!buffer_write_bytes(buffer, flags, flags_length)) { - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x0B; - return 1; - } else if (PyObject_IsInstance(value, MinKey)) { - *(buffer_get_buffer(buffer) + type_byte) = 0xFF; - return 1; - } else if (PyObject_IsInstance(value, MaxKey)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x7F; - return 1; - } else if (first_attempt) { - /* Try reloading the modules and having one more go at it. */ - if (WARN(PyExc_RuntimeWarning, "couldn't encode - reloading python " - "modules and trying again. if you see this without getting " - "an InvalidDocument exception please see http://api.mongodb" - ".org/python/current/faq.html#does-pymongo-work-with-mod-" - "wsgi") == -1) { - return 0; - } - if (_reload_python_objects()) { - return 0; - } - return write_element_to_buffer(buffer, type_byte, value, check_keys, 0); - } - { - PyObject* errmsg = PyString_FromString("Cannot encode object: "); - PyObject* repr = PyObject_Repr(value); - PyObject* InvalidDocument = _error("InvalidDocument"); - PyString_ConcatAndDel(&errmsg, repr); - PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); - Py_DECREF(errmsg); - Py_DECREF(InvalidDocument); - return 0; - } -} - -static int check_key_name(const char* name, - const Py_ssize_t name_length) { - int i; - if (name_length > 0 && name[0] == '$') { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyObject* errmsg = PyString_FromFormat("key '%s' must not start with '$'", name); - PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); - Py_DECREF(errmsg); - Py_DECREF(InvalidDocument); - return 0; - } - for (i = 0; i < name_length; i++) { - if (name[i] == '.') { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyObject* errmsg = PyString_FromFormat("key '%s' must not contain '.'", name); - PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); - Py_DECREF(errmsg); - Py_DECREF(InvalidDocument); - return 0; - } - } - return 1; -} - -/* Write a (key, value) pair to the buffer. - * - * Returns 0 on failure */ -int write_pair(buffer_t buffer, const char* name, Py_ssize_t name_length, - PyObject* value, unsigned char check_keys, unsigned char allow_id) { - int type_byte; - - /* Don't write any _id elements unless we're explicitly told to - - * _id has to be written first so we do so, but don't bother - * deleting it from the dictionary being written. */ - if (!allow_id && strcmp(name, "_id") == 0) { - return 1; - } - - type_byte = buffer_save_space(buffer, 1); - if (type_byte == -1) { - PyErr_NoMemory(); - return 0; - } - if (check_keys && !check_key_name(name, name_length)) { - return 0; - } - if (!buffer_write_bytes(buffer, name, name_length + 1)) { - return 0; - } - if (!write_element_to_buffer(buffer, type_byte, value, check_keys, 1)) { - return 0; - } - return 1; -} - -int decode_and_write_pair(buffer_t buffer, - PyObject* key, PyObject* value, - unsigned char check_keys, unsigned char top_level) { - PyObject* encoded; - if (PyUnicode_Check(key)) { - result_t status; - encoded = PyUnicode_AsUTF8String(key); - if (!encoded) { - return 0; - } - status = check_string((const unsigned char*)PyString_AsString(encoded), - PyString_Size(encoded), 0, 1); - - if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - return 0; - } - } else if (PyString_Check(key)) { - result_t status; - encoded = key; - Py_INCREF(encoded); - - status = check_string((const unsigned char*)PyString_AsString(encoded), - PyString_Size(encoded), 1, 1); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - PyErr_SetString(InvalidStringData, - "strings in documents must be valid UTF-8"); - Py_DECREF(InvalidStringData); - return 0; - } else if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - return 0; - } - } else { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyObject* errmsg = PyString_FromString("documents must have only string keys, key was "); - PyObject* repr = PyObject_Repr(key); - PyString_ConcatAndDel(&errmsg, repr); - PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); - Py_DECREF(InvalidDocument); - Py_DECREF(errmsg); - return 0; - } - - /* If top_level is True, don't allow writing _id here - it was already written. */ - if (!write_pair(buffer, PyString_AsString(encoded), - PyString_Size(encoded), value, check_keys, !top_level)) { - Py_DECREF(encoded); - return 0; - } - - Py_DECREF(encoded); - return 1; -} - -/* returns 0 on failure */ -int write_dict(buffer_t buffer, PyObject* dict, unsigned char check_keys, unsigned char top_level) { - PyObject* key; - PyObject* iter; - char zero = 0; - int length; - int length_location; - - if (!PyDict_Check(dict)) { - PyObject* errmsg = PyString_FromString("encoder expected a mapping type but got: "); - PyObject* repr = PyObject_Repr(dict); - PyString_ConcatAndDel(&errmsg, repr); - PyErr_SetString(PyExc_TypeError, PyString_AsString(errmsg)); - Py_DECREF(errmsg); - return 0; - } - - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - return 0; - } - - /* Write _id first if this is a top level doc. */ - if (top_level) { - PyObject* _id = PyDict_GetItemString(dict, "_id"); - if (_id) { - /* Don't bother checking keys, but do make sure we're allowed to - * write _id */ - if (!write_pair(buffer, "_id", 3, _id, 0, 1)) { - return 0; - } - } - } - - iter = PyObject_GetIter(dict); - if (iter == NULL) { - return 0; - } - while ((key = PyIter_Next(iter)) != NULL) { - PyObject* value = PyDict_GetItem(dict, key); - if (!value) { - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - Py_DECREF(iter); - return 0; - } - if (!decode_and_write_pair(buffer, key, value, check_keys, top_level)) { - Py_DECREF(key); - Py_DECREF(iter); - return 0; - } - Py_DECREF(key); - } - Py_DECREF(iter); - - /* write null byte and fill in length */ - if (!buffer_write_bytes(buffer, &zero, 1)) { - return 0; - } - length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); - return 1; -} - -static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { - PyObject* dict; - PyObject* result; - unsigned char check_keys; - buffer_t buffer; - - if (!PyArg_ParseTuple(args, "Ob", &dict, &check_keys)) { - return NULL; - } - - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - return NULL; - } - - if (!write_dict(buffer, dict, check_keys, 1)) { - buffer_free(buffer); - return NULL; - } - - /* objectify buffer */ - result = Py_BuildValue("s#", buffer_get_buffer(buffer), - buffer_get_position(buffer)); - buffer_free(buffer); - return result; -} - -static PyObject* get_value(const char* buffer, int* position, int type, - PyObject* as_class, unsigned char tz_aware) { - PyObject* value; - switch (type) { - case 1: - { - double d; - memcpy(&d, buffer + *position, 8); - value = PyFloat_FromDouble(d); - if (!value) { - return NULL; - } - *position += 8; - break; - } - case 2: - case 13: - case 14: - { - int value_length = ((int*)(buffer + *position))[0] - 1; - *position += 4; - value = PyUnicode_DecodeUTF8(buffer + *position, value_length, "strict"); - if (!value) { - return NULL; - } - *position += value_length + 1; - break; - } - case 3: - { - int size; - memcpy(&size, buffer + *position, 4); - value = elements_to_dict(buffer + *position + 4, size - 5, as_class, tz_aware); - if (!value) { - return NULL; - } - - /* Decoding for DBRefs */ - if (strcmp(buffer + *position + 5, "$ref") == 0) { /* DBRef */ - PyObject* dbref; - PyObject* collection = PyDict_GetItemString(value, "$ref"); - PyObject* id = PyDict_GetItemString(value, "$id"); - PyObject* database = PyDict_GetItemString(value, "$db"); - - Py_INCREF(collection); - PyDict_DelItemString(value, "$ref"); - Py_INCREF(id); - PyDict_DelItemString(value, "$id"); - - if (database == NULL) { - database = Py_None; - Py_INCREF(database); - } else { - Py_INCREF(database); - PyDict_DelItemString(value, "$db"); - } - - dbref = PyObject_CallFunctionObjArgs(DBRef, collection, id, database, value, NULL); - Py_DECREF(value); - value = dbref; - - Py_DECREF(id); - Py_DECREF(collection); - Py_DECREF(database); - if (!value) { - return NULL; - } - } - - *position += size; - break; - } - case 4: - { - int size, - end; - - memcpy(&size, buffer + *position, 4); - end = *position + size - 1; - *position += 4; - - value = PyList_New(0); - if (!value) { - return NULL; - } - while (*position < end) { - PyObject* to_append; - - int type = (int)buffer[(*position)++]; - int key_size = strlen(buffer + *position); - *position += key_size + 1; /* just skip the key, they're in order. */ - to_append = get_value(buffer, position, type, as_class, tz_aware); - if (!to_append) { - return NULL; - } - PyList_Append(value, to_append); - Py_DECREF(to_append); - } - (*position)++; - break; - } - case 5: - { - PyObject* data; - PyObject* st; - int length, - subtype; - - memcpy(&length, buffer + *position, 4); - subtype = (unsigned char)buffer[*position + 4]; - - if (subtype == 2) { - data = PyString_FromStringAndSize(buffer + *position + 9, length - 4); - } else { - data = PyString_FromStringAndSize(buffer + *position + 5, length); - } - if (!data) { - return NULL; - } - - if (subtype == 3 && UUID) { // Encode as UUID, not Binary - PyObject* kwargs; - PyObject* args = PyTuple_New(0); - if (!args) { - return NULL; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(args); - return NULL; - } - - assert(length == 16); // UUID should always be 16 bytes - - PyDict_SetItemString(kwargs, "bytes", data); - value = PyObject_Call(UUID, args, kwargs); - - Py_DECREF(args); - Py_DECREF(kwargs); - Py_DECREF(data); - if (!value) { - return NULL; - } - - *position += length + 5; - break; - } - - st = PyInt_FromLong(subtype); - if (!st) { - Py_DECREF(data); - return NULL; - } - value = PyObject_CallFunctionObjArgs(Binary, data, st, NULL); - Py_DECREF(st); - Py_DECREF(data); - if (!value) { - return NULL; - } - *position += length + 5; - break; - } - case 6: - case 10: - { - value = Py_None; - Py_INCREF(value); - break; - } - case 7: - { - value = PyObject_CallFunction(ObjectId, "s#", buffer + *position, 12); - if (!value) { - return NULL; - } - *position += 12; - break; - } - case 8: - { - value = buffer[(*position)++] ? Py_True : Py_False; - Py_INCREF(value); - break; - } - case 9: - { - PyObject* replace; - PyObject* args; - PyObject* kwargs; - PyObject* naive = datetime_from_millis(*(long long*)(buffer + *position)); - *position += 8; - if (!tz_aware) { /* In the naive case, we're done here. */ - value = naive; - break; - } - - if (!naive) { - return NULL; - } - replace = PyObject_GetAttrString(naive, "replace"); - Py_DECREF(naive); - if (!replace) { - return NULL; - } - args = PyTuple_New(0); - if (!args) { - Py_DECREF(replace); - return NULL; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(replace); - Py_DECREF(args); - return NULL; - } - if (PyDict_SetItemString(kwargs, "tzinfo", UTC) == -1) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - value = PyObject_Call(replace, args, kwargs); - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - break; - } - case 11: - { - int flags_length, - flags, - i; - - int pattern_length = strlen(buffer + *position); - PyObject* pattern = PyUnicode_DecodeUTF8(buffer + *position, pattern_length, "strict"); - if (!pattern) { - return NULL; - } - *position += pattern_length + 1; - flags_length = strlen(buffer + *position); - flags = 0; - for (i = 0; i < flags_length; i++) { - if (buffer[*position + i] == 'i') { - flags |= 2; - } else if (buffer[*position + i] == 'l') { - flags |= 4; - } else if (buffer[*position + i] == 'm') { - flags |= 8; - } else if (buffer[*position + i] == 's') { - flags |= 16; - } else if (buffer[*position + i] == 'u') { - flags |= 32; - } else if (buffer[*position + i] == 'x') { - flags |= 64; - } - } - *position += flags_length + 1; - value = PyObject_CallFunction(RECompile, "Oi", pattern, flags); - Py_DECREF(pattern); - break; - } - case 12: - { - int collection_length; - PyObject* collection; - PyObject* id; - - *position += 4; - collection_length = strlen(buffer + *position); - collection = PyUnicode_DecodeUTF8(buffer + *position, collection_length, "strict"); - if (!collection) { - return NULL; - } - *position += collection_length + 1; - id = PyObject_CallFunction(ObjectId, "s#", buffer + *position, 12); - if (!id) { - Py_DECREF(collection); - return NULL; - } - *position += 12; - value = PyObject_CallFunctionObjArgs(DBRef, collection, id, NULL); - Py_DECREF(collection); - Py_DECREF(id); - break; - } - case 15: - { - int code_length, - scope_size; - PyObject* code; - PyObject* scope; - - *position += 8; - code_length = strlen(buffer + *position); - code = PyUnicode_DecodeUTF8(buffer + *position, code_length, "strict"); - if (!code) { - return NULL; - } - *position += code_length + 1; - - memcpy(&scope_size, buffer + *position, 4); - scope = elements_to_dict(buffer + *position + 4, scope_size - 5, - (PyObject*)&PyDict_Type, tz_aware); - if (!scope) { - Py_DECREF(code); - return NULL; - } - *position += scope_size; - - value = PyObject_CallFunctionObjArgs(Code, code, scope, NULL); - Py_DECREF(code); - Py_DECREF(scope); - break; - } - case 16: - { - int i; - memcpy(&i, buffer + *position, 4); - value = PyInt_FromLong(i); - if (!value) { - return NULL; - } - *position += 4; - break; - } - case 17: - { - unsigned int time, inc; - memcpy(&inc, buffer + *position, 4); - memcpy(&time, buffer + *position + 4, 4); - value = PyObject_CallFunction(Timestamp, "II", time, inc); - if (!value) { - return NULL; - } - *position += 8; - break; - } - case 18: - { - long long ll; - memcpy(&ll, buffer + *position, 8); - value = PyLong_FromLongLong(ll); - if (!value) { - return NULL; - } - *position += 8; - break; - } - case -1: - { - value = PyObject_CallFunctionObjArgs(MinKey, NULL); - break; - } - case 127: - { - value = PyObject_CallFunctionObjArgs(MaxKey, NULL); - break; - } - default: - { - PyObject* InvalidDocument = _error("InvalidDocument"); - PyErr_SetString(InvalidDocument, "no c decoder for this type yet"); - Py_DECREF(InvalidDocument); - return NULL; - } - } - return value; -} - -static PyObject* elements_to_dict(const char* string, int max, - PyObject* as_class, unsigned char tz_aware) { - int position = 0; - PyObject* dict = PyObject_CallObject(as_class, NULL); - if (!dict) { - return NULL; - } - while (position < max) { - int type = (int)string[position++]; - int name_length = strlen(string + position); - PyObject* name = PyUnicode_DecodeUTF8(string + position, name_length, "strict"); - PyObject* value; - if (!name) { - return NULL; - } - position += name_length + 1; - value = get_value(string, &position, type, as_class, tz_aware); - if (!value) { - return NULL; - } - - PyObject_SetItem(dict, name, value); - Py_DECREF(name); - Py_DECREF(value); - } - return dict; -} - -static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { - unsigned int size; - Py_ssize_t total_size; - const char* string; - PyObject* bson; - PyObject* as_class; - unsigned char tz_aware; - PyObject* dict; - PyObject* remainder; - PyObject* result; - - if (!PyArg_ParseTuple(args, "OOb", &bson, &as_class, &tz_aware)) { - return NULL; - } - - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _bson_to_dict must be a string"); - return NULL; - } - total_size = PyString_Size(bson); - if (total_size < 5) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "not enough data for a BSON document"); - Py_DECREF(InvalidBSON); - return NULL; - } - - string = PyString_AsString(bson); - if (!string) { - return NULL; - } - memcpy(&size, string, 4); - - if (total_size < size) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "objsize too large"); - Py_DECREF(InvalidBSON); - return NULL; - } - - if (string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "bad eoo"); - Py_DECREF(InvalidBSON); - return NULL; - } - - dict = elements_to_dict(string + 4, size - 5, as_class, tz_aware); - if (!dict) { - return NULL; - } - remainder = PyString_FromStringAndSize(string + size, total_size - size); - if (!remainder) { - Py_DECREF(dict); - return NULL; - } - result = Py_BuildValue("OO", dict, remainder); - Py_DECREF(dict); - Py_DECREF(remainder); - return result; -} - -static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { - unsigned int size; - Py_ssize_t total_size; - const char* string; - PyObject* bson; - PyObject* dict; - PyObject* result; - PyObject* as_class = (PyObject*)&PyDict_Type; - unsigned char tz_aware = 1; - - if (!PyArg_ParseTuple(args, "O|Ob", &bson, &as_class, &tz_aware)) { - return NULL; - } - - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to decode_all must be a string"); - return NULL; - } - total_size = PyString_Size(bson); - string = PyString_AsString(bson); - if (!string) { - return NULL; - } - - result = PyList_New(0); - - while (total_size > 0) { - if (total_size < 5) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "not enough data for a BSON document"); - Py_DECREF(InvalidBSON); - return NULL; - } - - memcpy(&size, string, 4); - - if (total_size < size) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "objsize too large"); - Py_DECREF(InvalidBSON); - return NULL; - } - - if (string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - PyErr_SetString(InvalidBSON, - "bad eoo"); - Py_DECREF(InvalidBSON); - return NULL; - } - - dict = elements_to_dict(string + 4, size - 5, as_class, tz_aware); - if (!dict) { - return NULL; - } - PyList_Append(result, dict); - Py_DECREF(dict); - string += size; - total_size -= size; - } - - return result; -} - -static PyMethodDef _CBSONMethods[] = { - {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, - "convert a dictionary to a string containing it's BSON representation."}, - {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, - "convert a BSON string to a SON object."}, - {"decode_all", _cbson_decode_all, METH_VARARGS, - "convert binary data to a sequence of documents."}, - {NULL, NULL, 0, NULL} -}; - -PyMODINIT_FUNC init_cbson(void) { - PyObject *m; - - PyDateTime_IMPORT; - m = Py_InitModule("_cbson", _CBSONMethods); - if (m == NULL) { - return; - } - - // TODO we don't do any error checking here, should we be? - _reload_python_objects(); -} diff -Nru pymongo-1.11/bson/code.py pymongo-1.7/bson/code.py --- pymongo-1.11/bson/code.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/code.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for representing JavaScript code in BSON. -""" - - -class Code(str): - """BSON's JavaScript code type. - - Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` or `scope` is not ``None`` or an instance of - :class:`dict`. - - Scope variables can be set by passing a dictionary as the `scope` - argument or by using keyword arguments. If a variable is set as a - keyword argument it will override any setting for that variable in - the `scope` dictionary. - - :Parameters: - - `code`: string containing JavaScript code to be evaluated - - `scope` (optional): dictionary representing the scope in which - `code` should be evaluated - a mapping from identifiers (as - strings) to values - - `**kwargs` (optional): scope variables can also be passed as - keyword arguments - - .. versionadded:: 1.9 - Ability to pass scope values using keyword arguments. - """ - - def __new__(cls, code, scope=None, **kwargs): - if not isinstance(code, basestring): - raise TypeError("code must be an instance of basestring") - - self = str.__new__(cls, code) - - try: - self.__scope = code.scope - except AttributeError: - self.__scope = {} - - if scope is not None: - if not isinstance(scope, dict): - raise TypeError("scope must be an instance of dict") - self.__scope.update(scope) - - self.__scope.update(kwargs) - - return self - - @property - def scope(self): - """Scope dictionary for this instance. - """ - return self.__scope - - def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) - - def __eq__(self, other): - if isinstance(other, Code): - return (self.__scope, str(self)) == (other.__scope, str(other)) - return False - - def __ne__(self, other): - return not self == other diff -Nru pymongo-1.11/bson/dbref.py pymongo-1.7/bson/dbref.py --- pymongo-1.11/bson/dbref.py 2011-04-05 17:09:10.000000000 +0000 +++ pymongo-1.7/bson/dbref.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for manipulating DBRefs (references to MongoDB documents).""" - -from bson.son import SON -from copy import deepcopy - - -class DBRef(object): - """A reference to a document stored in MongoDB. - """ - - def __init__(self, collection, id, database=None, _extra={}, **kwargs): - """Initialize a new :class:`DBRef`. - - Raises :class:`TypeError` if `collection` or `database` is not - an instance of :class:`basestring`. `database` is optional and - allows references to documents to work across databases. Any - additional keyword arguments will create additional fields in - the resultant embedded document. - - :Parameters: - - `collection`: name of the collection the document is stored in - - `id`: the value of the document's ``"_id"`` field - - `database` (optional): name of the database to reference - - `**kwargs` (optional): additional keyword arguments will - create additional, custom fields - - .. versionchanged:: 1.8 - Now takes keyword arguments to specify additional fields. - .. versionadded:: 1.1.1 - The `database` parameter. - - .. mongodoc:: dbrefs - """ - if not isinstance(collection, basestring): - raise TypeError("collection must be an instance of basestring") - if database is not None and not isinstance(database, basestring): - raise TypeError("database must be an instance of basestring") - - self.__collection = collection - self.__id = id - self.__database = database - kwargs.update(_extra) - self.__kwargs = kwargs - - @property - def collection(self): - """Get the name of this DBRef's collection as unicode. - """ - return self.__collection - - @property - def id(self): - """Get this DBRef's _id. - """ - return self.__id - - @property - def database(self): - """Get the name of this DBRef's database. - - Returns None if this DBRef doesn't specify a database. - - .. versionadded:: 1.1.1 - """ - return self.__database - - def __getattr__(self, key): - try: - return self.__kwargs[key] - except KeyError: - raise AttributeError(key) - - # Have to provide __setstate__ to avoid - # infinite recursion since we override - # __getattr__. - def __setstate__(self, state): - self.__dict__.update(state) - - def as_doc(self): - """Get the SON document representation of this DBRef. - - Generally not needed by application developers - """ - doc = SON([("$ref", self.collection), - ("$id", self.id)]) - if self.database is not None: - doc["$db"] = self.database - doc.update(self.__kwargs) - return doc - - def __repr__(self): - extra = "".join([", %s=%r" % (k, v) - for k, v in self.__kwargs.iteritems()]) - if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, - self.database, extra) - - def __cmp__(self, other): - if isinstance(other, DBRef): - return cmp([self.__database, self.__collection, - self.__id, self.__kwargs], - [other.__database, other.__collection, - other.__id, other.__kwargs]) - return NotImplemented - - def __hash__(self): - """Get a hash value for this :class:`DBRef`. - - .. versionadded:: 1.1 - """ - return hash((self.__collection, self.__id, self.__database, - tuple(sorted(self.__kwargs.items())))) - - def __deepcopy__(self, memo): - """Support function for `copy.deepcopy()`. - - .. versionadded:: 1.10 - """ - return DBRef(deepcopy(self.__collection, memo), - deepcopy(self.__id, memo), - deepcopy(self.__database, memo), - deepcopy(self.__kwargs, memo)) diff -Nru pymongo-1.11/bson/encoding_helpers.c pymongo-1.7/bson/encoding_helpers.c --- pymongo-1.11/bson/encoding_helpers.c 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/encoding_helpers.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "encoding_helpers.h" - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static unsigned char isLegalUTF8(const unsigned char* source, int length) { - unsigned char a; - const unsigned char* srcptr = source + length; - switch (length) { - default: return 0; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 2: if ((a = (*--srcptr)) > 0xBF) return 0; - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return 0; break; - case 0xF0: if (a < 0x90) return 0; break; - case 0xF4: if (a > 0x8F) return 0; break; - default: if (a < 0x80) return 0; - } - case 1: if (*source >= 0x80 && *source < 0xC2) return 0; - if (*source > 0xF4) return 0; - } - return 1; -} - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null) { - int position = 0; - /* By default we go character by character. Will be different for checking - * UTF-8 */ - int sequence_length = 1; - - if (!check_utf8 && !check_null) { - return VALID; - } - - while (position < length) { - if (check_null && *(string + position) == 0) { - return HAS_NULL; - } - if (check_utf8) { - sequence_length = trailingBytesForUTF8[*(string + position)] + 1; - if ((position + sequence_length) > length) { - return NOT_UTF_8; - } - if (!isLegalUTF8(string + position, sequence_length)) { - return NOT_UTF_8; - } - } - position += sequence_length; - } - - return VALID; -} diff -Nru pymongo-1.11/bson/encoding_helpers.h pymongo-1.7/bson/encoding_helpers.h --- pymongo-1.11/bson/encoding_helpers.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/encoding_helpers.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ENCODING_HELPERS_H -#define ENCODING_HELPERS_H - -typedef enum { - VALID, - NOT_UTF_8, - HAS_NULL -} result_t; - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null); - -#endif diff -Nru pymongo-1.11/bson/errors.py pymongo-1.7/bson/errors.py --- pymongo-1.11/bson/errors.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/errors.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Exceptions raised by the BSON package.""" - - -class BSONError(Exception): - """Base class for all BSON exceptions. - """ - - -class InvalidBSON(BSONError): - """Raised when trying to create a BSON object from invalid data. - """ - - -class InvalidStringData(BSONError): - """Raised when trying to encode a string containing non-UTF8 data. - """ - - -class InvalidDocument(BSONError): - """Raised when trying to create a BSON object from an invalid document. - """ - - -class InvalidId(BSONError): - """Raised when trying to create an ObjectId from invalid data. - """ diff -Nru pymongo-1.11/bson/__init__.py pymongo-1.7/bson/__init__.py --- pymongo-1.11/bson/__init__.py 2011-04-06 17:51:41.000000000 +0000 +++ pymongo-1.7/bson/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,518 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""BSON (Binary JSON) encoding and decoding. -""" - -import calendar -import datetime -import re -import struct -import warnings - -from bson.binary import Binary -from bson.code import Code -from bson.dbref import DBRef -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.objectid import ObjectId -from bson.son import SON -from bson.timestamp import Timestamp -from bson.tz_util import utc - - -try: - import _cbson - _use_c = True -except ImportError: - _use_c = False - -try: - import uuid - _use_uuid = True -except ImportError: - _use_uuid = False - - -# This sort of sucks, but seems to be as good as it gets... -RE_TYPE = type(re.compile("")) - -MAX_INT32 = 2147483647 -MIN_INT32 = -2147483648 -MAX_INT64 = 9223372036854775807 -MIN_INT64 = -9223372036854775808 - - -def _get_int(data, as_class=None, tz_aware=False, unsigned=False): - format = unsigned and "I" or "i" - try: - value = struct.unpack("<%s" % format, data[:4])[0] - except struct.error: - raise InvalidBSON() - - return (value, data[4:]) - - -def _get_c_string(data, length=None): - if length is None: - try: - length = data.index("\x00") - except ValueError: - raise InvalidBSON() - - return (unicode(data[:length], "utf-8"), data[length + 1:]) - - -def _make_c_string(string, check_null=False): - if check_null and "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NULL character") - if isinstance(string, unicode): - return string.encode("utf-8") + "\x00" - else: - try: - string.decode("utf-8") - return string + "\x00" - except: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) - - -def _get_number(data, as_class, tz_aware): - return (struct.unpack(" MAX_INT64 or value < MIN_INT64: - raise OverflowError("BSON can only handle up to 8-byte ints") - if value > MAX_INT32 or value < MIN_INT32: - return "\x12" + name + struct.pack(" MAX_INT64 or value < MIN_INT64: - raise OverflowError("BSON can only handle up to 8-byte ints") - return "\x12" + name + struct.pack("`_. They allow for -specialized encoding and decoding of BSON documents into `Mongo -Extended JSON -`_'s *Strict* -mode. This lets you encode / decode BSON documents to JSON even when -they use special BSON types. - -Example usage (serialization):: - ->>> json.dumps(..., default=json_util.default) - -Example usage (deserialization):: - ->>> json.loads(..., object_hook=json_util.object_hook) - -Currently this does not handle special encoding and decoding for -:class:`~bson.binary.Binary` and :class:`~bson.code.Code` instances. - -.. versionchanged:: 1.9 - Handle :class:`uuid.UUID` instances, whenever possible. - -.. versionchanged:: 1.8 - Handle timezone aware datetime instances on encode, decode to - timezone aware datetime instances. - -.. versionchanged:: 1.8 - Added support for encoding/decoding :class:`~bson.max_key.MaxKey` - and :class:`~bson.min_key.MinKey`, and for encoding - :class:`~bson.timestamp.Timestamp`. - -.. versionchanged:: 1.2 - Added support for encoding/decoding datetimes and regular expressions. -""" - -import calendar -import datetime -import re -try: - import uuid - _use_uuid = True -except ImportError: - _use_uuid = False - -from bson.dbref import DBRef -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.objectid import ObjectId -from bson.timestamp import Timestamp -from bson.tz_util import utc - -# TODO support Binary and Code -# Binary and Code are tricky because they subclass str so json thinks it can -# handle them. Not sure what the proper way to get around this is... -# -# One option is to just add some other method that users need to call _before_ -# calling json.dumps or json.loads. That is pretty terrible though... - -# TODO share this with bson.py? -_RE_TYPE = type(re.compile("foo")) - - -def object_hook(dct): - if "$oid" in dct: - return ObjectId(str(dct["$oid"])) - if "$ref" in dct: - return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None)) - if "$date" in dct: - return datetime.datetime.fromtimestamp(float(dct["$date"]) / 1000.0, - utc) - if "$regex" in dct: - flags = 0 - if "i" in dct["$options"]: - flags |= re.IGNORECASE - if "m" in dct["$options"]: - flags |= re.MULTILINE - return re.compile(dct["$regex"], flags) - if "$minKey" in dct: - return MinKey() - if "$maxKey" in dct: - return MaxKey() - if _use_uuid and "$uuid" in dct: - return uuid.UUID(dct["$uuid"]) - return dct - - -def default(obj): - if isinstance(obj, ObjectId): - return {"$oid": str(obj)} - if isinstance(obj, DBRef): - return obj.as_doc() - if isinstance(obj, datetime.datetime): - # TODO share this code w/ bson.py? - if obj.utcoffset() is not None: - obj = obj - obj.utcoffset() - millis = int(calendar.timegm(obj.timetuple()) * 1000 + - obj.microsecond / 1000) - return {"$date": millis} - if isinstance(obj, _RE_TYPE): - flags = "" - if obj.flags & re.IGNORECASE: - flags += "i" - if obj.flags & re.MULTILINE: - flags += "m" - return {"$regex": obj.pattern, - "$options": flags} - if isinstance(obj, MinKey): - return {"$minKey": 1} - if isinstance(obj, MaxKey): - return {"$maxKey": 1} - if isinstance(obj, Timestamp): - return {"t": obj.time, "i": obj.inc} - if _use_uuid and isinstance(obj, uuid.UUID): - return {"$uuid": obj.hex} - raise TypeError("%r is not JSON serializable" % obj) diff -Nru pymongo-1.11/bson/max_key.py pymongo-1.7/bson/max_key.py --- pymongo-1.11/bson/max_key.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/max_key.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# Copyright 2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Representation for the MongoDB internal MaxKey type. -""" - - -class MaxKey(object): - """MongoDB internal MaxKey type. - """ - - def __eq__(self, other): - if isinstance(other, MaxKey): - return True - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "MaxKey()" diff -Nru pymongo-1.11/bson/min_key.py pymongo-1.7/bson/min_key.py --- pymongo-1.11/bson/min_key.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/min_key.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# Copyright 2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Representation for the MongoDB internal MinKey type. -""" - - -class MinKey(object): - """MongoDB internal MinKey type. - """ - - def __eq__(self, other): - if isinstance(other, MinKey): - return True - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "MinKey()" diff -Nru pymongo-1.11/bson/objectid.py pymongo-1.7/bson/objectid.py --- pymongo-1.11/bson/objectid.py 2011-04-05 16:24:09.000000000 +0000 +++ pymongo-1.7/bson/objectid.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with MongoDB `ObjectIds -`_. -""" - -import calendar -import datetime -try: - import hashlib - _md5func = hashlib.md5 -except ImportError: # for Python < 2.5 - import md5 - _md5func = md5.new -import os -import socket -import struct -import threading -import time - -from bson.errors import InvalidId -from bson.tz_util import utc - - -def _machine_bytes(): - """Get the machine portion of an ObjectId. - """ - machine_hash = _md5func() - machine_hash.update(socket.gethostname()) - return machine_hash.digest()[0:3] - - -class ObjectId(object): - """A MongoDB ObjectId. - """ - - _inc = 0 - _inc_lock = threading.Lock() - - _machine_bytes = _machine_bytes() - - __slots__ = ('__id') - - def __init__(self, oid=None): - """Initialize a new ObjectId. - - If `oid` is ``None``, create a new (unique) ObjectId. If `oid` - is an instance of (``basestring``, :class:`ObjectId`) validate - it and use that. Otherwise, a :class:`TypeError` is - raised. If `oid` is invalid, - :class:`~bson.errors.InvalidId` is raised. - - :Parameters: - - `oid` (optional): a valid ObjectId (12 byte binary or 24 character - hex string) - - .. versionadded:: 1.2.1 - The `oid` parameter can be a ``unicode`` instance (that contains - only hexadecimal digits). - - .. mongodoc:: objectids - """ - if oid is None: - self.__generate() - else: - self.__validate(oid) - - @classmethod - def from_datetime(cls, generation_time): - """Create a dummy ObjectId instance with a specific generation time. - - This method is useful for doing range queries on a field - containing :class:`ObjectId` instances. - - .. warning:: - It is not safe to insert a document containing an ObjectId - generated using this method. This method deliberately - eliminates the uniqueness guarantee that ObjectIds - generally provide. ObjectIds generated with this method - should be used exclusively in queries. - - `generation_time` will be converted to UTC. Naive datetime - instances will be treated as though they already contain UTC. - - An example using this helper to get documents where ``"_id"`` - was generated before January 1, 2010 would be: - - >>> gen_time = datetime.datetime(2010, 1, 1) - >>> dummy_id = ObjectId.from_datetime(gen_time) - >>> result = collection.find({"_id": {"$lt": dummy_id}}) - - :Parameters: - - `generation_time`: :class:`~datetime.datetime` to be used - as the generation time for the resulting ObjectId. - - .. versionchanged:: 1.8 - Properly handle timezone aware values for - `generation_time`. - - .. versionadded:: 1.6 - """ - if generation_time.utcoffset() is not None: - generation_time = generation_time - generation_time.utcoffset() - ts = calendar.timegm(generation_time.timetuple()) - oid = struct.pack(">i", int(ts)) + "\x00" * 8 - return cls(oid) - - def __generate(self): - """Generate a new value for this ObjectId. - """ - oid = "" - - # 4 bytes current time - oid += struct.pack(">i", int(time.time())) - - # 3 bytes machine - oid += ObjectId._machine_bytes - - # 2 bytes pid - oid += struct.pack(">H", os.getpid() % 0xFFFF) - - # 3 bytes inc - ObjectId._inc_lock.acquire() - oid += struct.pack(">i", ObjectId._inc)[1:4] - ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF - ObjectId._inc_lock.release() - - self.__id = oid - - def __validate(self, oid): - """Validate and use the given id for this ObjectId. - - Raises TypeError if id is not an instance of (str, ObjectId) and - InvalidId if it is not a valid ObjectId. - - :Parameters: - - `oid`: a valid ObjectId - """ - if isinstance(oid, ObjectId): - self.__id = oid.__id - elif isinstance(oid, basestring): - if len(oid) == 12: - self.__id = oid - elif len(oid) == 24: - try: - self.__id = oid.decode("hex") - except TypeError: - raise InvalidId("%s is not a valid ObjectId" % oid) - else: - raise InvalidId("%s is not a valid ObjectId" % oid) - else: - raise TypeError("id must be an instance of (str, ObjectId), " - "not %s" % type(oid)) - - @property - def binary(self): - """12-byte binary representation of this ObjectId. - """ - return self.__id - - @property - def generation_time(self): - """A :class:`datetime.datetime` instance representing the time of - generation for this :class:`ObjectId`. - - The :class:`datetime.datetime` is timezone aware, and - represents the generation time in UTC. It is precise to the - second. - - .. versionchanged:: 1.8 - Now return an aware datetime instead of a naive one. - - .. versionadded:: 1.2 - """ - t = struct.unpack(">i", self.__id[0:4])[0] - return datetime.datetime.fromtimestamp(t, utc) - - def __getstate__(self): - """return value of object for pickling. - needed explicitly because __slots__() defined. - """ - return self.__id - - def __setstate__(self, value): - """explicit state set from pickling - """ - # Provide backwards compatability with OIDs - # pickled with pymongo-1.9. - if isinstance(value, dict): - self.__id = value['_ObjectId__id'] - else: - self.__id = value - - def __str__(self): - return self.__id.encode("hex") - - def __repr__(self): - return "ObjectId('%s')" % self.__id.encode("hex") - - def __cmp__(self, other): - if isinstance(other, ObjectId): - return cmp(self.__id, other.__id) - return NotImplemented - - def __hash__(self): - """Get a hash value for this :class:`ObjectId`. - - .. versionadded:: 1.1 - """ - return hash(self.__id) diff -Nru pymongo-1.11/bson/son.py pymongo-1.7/bson/son.py --- pymongo-1.11/bson/son.py 2011-04-06 17:52:30.000000000 +0000 +++ pymongo-1.7/bson/son.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,212 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for creating and manipulating SON, the Serialized Ocument Notation. - -Regular dictionaries can be used instead of SON objects, but not when the order -of keys is important. A SON object can be used just like a normal Python -dictionary.""" - -import copy - - -class SON(dict): - """SON data. - - A subclass of dict that maintains ordering of keys and provides a - few extra niceties for dealing with SON. SON objects can be - converted to and from BSON. - - The mapping from Python types to BSON types is as follows: - - =================================== ============= =================== - Python Type BSON Type Supported Direction - =================================== ============= =================== - None null both - bool boolean both - int number (int) both - float number (real) both - string string py -> bson - unicode string both - list array both - dict / `SON` object both - datetime.datetime [#dt]_ [#dt2]_ date both - compiled re regex both - `bson.binary.Binary` binary both - `bson.objectid.ObjectId` oid both - `bson.dbref.DBRef` dbref both - None undefined bson -> py - unicode code bson -> py - `bson.code.Code` code py -> bson - unicode symbol bson -> py - =================================== ============= =================== - - Note that to save binary data it must be wrapped as an instance of - `bson.binary.Binary`. Otherwise it will be saved as a BSON string - and retrieved as unicode. - - .. [#dt] datetime.datetime instances will be rounded to the nearest - millisecond when saved - .. [#dt2] all datetime.datetime instances are treated as *naive*. clients - should always use UTC. - """ - - def __init__(self, data=None, **kwargs): - self.__keys = [] - dict.__init__(self) - self.update(data) - self.update(kwargs) - - def __repr__(self): - result = [] - for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) - return "SON([%s])" % ", ".join(result) - - def __setitem__(self, key, value): - if key not in self: - self.__keys.append(key) - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - self.__keys.remove(key) - dict.__delitem__(self, key) - - def keys(self): - return list(self.__keys) - - def copy(self): - other = SON() - other.update(self) - return other - - # TODO this is all from UserDict.DictMixin. it could probably be made more - # efficient. - # second level definitions support higher levels - def __iter__(self): - for k in self.keys(): - yield k - - def has_key(self, key): - return key in self.keys() - - def __contains__(self, key): - return key in self.keys() - - # third level takes advantage of second level definitions - def iteritems(self): - for k in self: - yield (k, self[k]) - - def iterkeys(self): - return self.__iter__() - - # fourth level uses definitions from lower levels - def itervalues(self): - for _, v in self.iteritems(): - yield v - - def values(self): - return [v for _, v in self.iteritems()] - - def items(self): - return list(self.iteritems()) - - def clear(self): - for key in self.keys(): - del self[key] - - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def pop(self, key, *args): - if len(args) > 1: - raise TypeError("pop expected at most 2 arguments, got "\ - + repr(1 + len(args))) - try: - value = self[key] - except KeyError: - if args: - return args[0] - raise - del self[key] - return value - - def popitem(self): - try: - k, v = self.iteritems().next() - except StopIteration: - raise KeyError('container is empty') - del self[k] - return (k, v) - - def update(self, other=None, **kwargs): - # Make progressively weaker assumptions about "other" - if other is None: - pass - elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups - for k, v in other.iteritems(): - self[k] = v - elif hasattr(other, 'keys'): - for k in other.keys(): - self[k] = other[k] - else: - for k, v in other: - self[k] = v - if kwargs: - self.update(kwargs) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def __cmp__(self, other): - if isinstance(other, SON): - return cmp((dict(self.iteritems()), self.keys()), - (dict(other.iteritems()), other.keys())) - return cmp(dict(self.iteritems()), other) - - def __len__(self): - return len(self.keys()) - - def to_dict(self): - """Convert a SON document to a normal Python dictionary instance. - - This is trickier than just *dict(...)* because it needs to be - recursive. - """ - - def transform_value(value): - if isinstance(value, list): - return [transform_value(v) for v in value] - if isinstance(value, SON): - value = dict(value) - if isinstance(value, dict): - for k, v in value.iteritems(): - value[k] = transform_value(v) - return value - - return transform_value(dict(self)) - - def __deepcopy__(self, memo): - out = SON() - for k, v in self.iteritems(): - out[k] = copy.deepcopy(v, memo) - return out diff -Nru pymongo-1.11/bson/time64.c pymongo-1.7/bson/time64.c --- pymongo-1.11/bson/time64.c 2011-04-12 18:04:59.000000000 +0000 +++ pymongo-1.7/bson/time64.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,835 +0,0 @@ -/* - -Copyright (c) 2007-2010 Michael G Schwern - -This software originally derived from Paul Sheer's pivotal_gmtime_r.c. - -The MIT License: - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -*/ - -/* - -Programmers who have available to them 64-bit time values as a 'long -long' type can use localtime64_r() and gmtime64_r() which correctly -converts the time even on 32-bit systems. Whether you have 64-bit time -values will depend on the operating system. - -localtime64_r() is a 64-bit equivalent of localtime_r(). - -gmtime64_r() is a 64-bit equivalent of gmtime_r(). - -*/ - -#ifdef _MSC_VER - #define _CRT_SECURE_NO_WARNINGS -#endif - -#include -#include -#include -#include -#include -#include -#include "time64.h" -#include "time64_limits.h" - - -/* Spec says except for stftime() and the _r() functions, these - all return static memory. Stabbings! */ -static struct TM Static_Return_Date; -static char Static_Return_String[35]; - -static const int days_in_month[2][12] = { - {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, - {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, -}; - -static const int julian_days_by_month[2][12] = { - {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334}, - {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}, -}; - -static char wday_name[7][4] = { - "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" -}; - -static char mon_name[12][4] = { - "Jan", "Feb", "Mar", "Apr", "May", "Jun", - "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" -}; - -static const int length_of_year[2] = { 365, 366 }; - -/* Some numbers relating to the gregorian cycle */ -static const Year years_in_gregorian_cycle = 400; -#define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) -static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; - -/* Year range we can trust the time funcitons with */ -#define MAX_SAFE_YEAR 2037 -#define MIN_SAFE_YEAR 1971 - -/* 28 year Julian calendar cycle */ -#define SOLAR_CYCLE_LENGTH 28 - -/* Year cycle from MAX_SAFE_YEAR down. */ -static const int safe_years_high[SOLAR_CYCLE_LENGTH] = { - 2016, 2017, 2018, 2019, - 2020, 2021, 2022, 2023, - 2024, 2025, 2026, 2027, - 2028, 2029, 2030, 2031, - 2032, 2033, 2034, 2035, - 2036, 2037, 2010, 2011, - 2012, 2013, 2014, 2015 -}; - -/* Year cycle from MIN_SAFE_YEAR up */ -static const int safe_years_low[SOLAR_CYCLE_LENGTH] = { - 1996, 1997, 1998, 1971, - 1972, 1973, 1974, 1975, - 1976, 1977, 1978, 1979, - 1980, 1981, 1982, 1983, - 1984, 1985, 1986, 1987, - 1988, 1989, 1990, 1991, - 1992, 1993, 1994, 1995, -}; - -/* This isn't used, but it's handy to look at */ -static const int dow_year_start[SOLAR_CYCLE_LENGTH] = { - 5, 0, 1, 2, /* 0 2016 - 2019 */ - 3, 5, 6, 0, /* 4 */ - 1, 3, 4, 5, /* 8 1996 - 1998, 1971*/ - 6, 1, 2, 3, /* 12 1972 - 1975 */ - 4, 6, 0, 1, /* 16 */ - 2, 4, 5, 6, /* 20 2036, 2037, 2010, 2011 */ - 0, 2, 3, 4 /* 24 2012, 2013, 2014, 2015 */ -}; - -/* Let's assume people are going to be looking for dates in the future. - Let's provide some cheats so you can skip ahead. - This has a 4x speed boost when near 2008. -*/ -/* Number of days since epoch on Jan 1st, 2008 GMT */ -#define CHEAT_DAYS (1199145600 / 24 / 60 / 60) -#define CHEAT_YEARS 108 - -#define IS_LEAP(n) ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0) -#define WRAP(a,b,m) ((a) = ((a) < 0 ) ? ((b)--, (a) + (m)) : (a)) - -#ifdef USE_SYSTEM_LOCALTIME -# define SHOULD_USE_SYSTEM_LOCALTIME(a) ( \ - (a) <= SYSTEM_LOCALTIME_MAX && \ - (a) >= SYSTEM_LOCALTIME_MIN \ -) -#else -# define SHOULD_USE_SYSTEM_LOCALTIME(a) (0) -#endif - -#ifdef USE_SYSTEM_GMTIME -# define SHOULD_USE_SYSTEM_GMTIME(a) ( \ - (a) <= SYSTEM_GMTIME_MAX && \ - (a) >= SYSTEM_GMTIME_MIN \ -) -#else -# define SHOULD_USE_SYSTEM_GMTIME(a) (0) -#endif - -/* Multi varadic macros are a C99 thing, alas */ -#ifdef TIME_64_DEBUG -# define TIME64_TRACE(format) (fprintf(stderr, format)) -# define TIME64_TRACE1(format, var1) (fprintf(stderr, format, var1)) -# define TIME64_TRACE2(format, var1, var2) (fprintf(stderr, format, var1, var2)) -# define TIME64_TRACE3(format, var1, var2, var3) (fprintf(stderr, format, var1, var2, var3)) -#else -# define TIME64_TRACE(format) ((void)0) -# define TIME64_TRACE1(format, var1) ((void)0) -# define TIME64_TRACE2(format, var1, var2) ((void)0) -# define TIME64_TRACE3(format, var1, var2, var3) ((void)0) -#endif - - -static int is_exception_century(Year year) -{ - int is_exception = ((year % 100 == 0) && !(year % 400 == 0)); - TIME64_TRACE1("# is_exception_century: %s\n", is_exception ? "yes" : "no"); - - return(is_exception); -} - - -/* Compare two dates. - The result is like cmp. - Ignores things like gmtoffset and dst -*/ -int cmp_date( const struct TM* left, const struct tm* right ) { - if( left->tm_year > right->tm_year ) - return 1; - else if( left->tm_year < right->tm_year ) - return -1; - - if( left->tm_mon > right->tm_mon ) - return 1; - else if( left->tm_mon < right->tm_mon ) - return -1; - - if( left->tm_mday > right->tm_mday ) - return 1; - else if( left->tm_mday < right->tm_mday ) - return -1; - - if( left->tm_hour > right->tm_hour ) - return 1; - else if( left->tm_hour < right->tm_hour ) - return -1; - - if( left->tm_min > right->tm_min ) - return 1; - else if( left->tm_min < right->tm_min ) - return -1; - - if( left->tm_sec > right->tm_sec ) - return 1; - else if( left->tm_sec < right->tm_sec ) - return -1; - - return 0; -} - - -/* Check if a date is safely inside a range. - The intention is to check if its a few days inside. -*/ -int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cmp_date(date, min) == -1 ) - return 0; - - if( cmp_date(date, max) == 1 ) - return 0; - - return 1; -} - - -/* timegm() is not in the C or POSIX spec, but it is such a useful - extension I would be remiss in leaving it out. Also I need it - for localtime64() -*/ -Time64_T timegm64(const struct TM *date) { - Time64_T days = 0; - Time64_T seconds = 0; - Year year; - Year orig_year = (Year)date->tm_year; - int cycles = 0; - - if( orig_year > 100 ) { - cycles = (int)((orig_year - 100) / 400); - orig_year -= cycles * 400; - days += (Time64_T)cycles * days_in_gregorian_cycle; - } - else if( orig_year < -300 ) { - cycles = (int)((orig_year - 100) / 400); - orig_year -= cycles * 400; - days += (Time64_T)cycles * days_in_gregorian_cycle; - } - TIME64_TRACE3("# timegm/ cycles: %d, days: %lld, orig_year: %lld\n", cycles, days, orig_year); - - if( orig_year > 70 ) { - year = 70; - while( year < orig_year ) { - days += length_of_year[IS_LEAP(year)]; - year++; - } - } - else if ( orig_year < 70 ) { - year = 69; - do { - days -= length_of_year[IS_LEAP(year)]; - year--; - } while( year >= orig_year ); - } - - days += julian_days_by_month[IS_LEAP(orig_year)][date->tm_mon]; - days += date->tm_mday - 1; - - seconds = days * 60 * 60 * 24; - - seconds += date->tm_hour * 60 * 60; - seconds += date->tm_min * 60; - seconds += date->tm_sec; - - return(seconds); -} - - -static int check_tm(struct TM *tm) -{ - /* Don't forget leap seconds */ - assert(tm->tm_sec >= 0); - assert(tm->tm_sec <= 61); - - assert(tm->tm_min >= 0); - assert(tm->tm_min <= 59); - - assert(tm->tm_hour >= 0); - assert(tm->tm_hour <= 23); - - assert(tm->tm_mday >= 1); - assert(tm->tm_mday <= days_in_month[IS_LEAP(tm->tm_year)][tm->tm_mon]); - - assert(tm->tm_mon >= 0); - assert(tm->tm_mon <= 11); - - assert(tm->tm_wday >= 0); - assert(tm->tm_wday <= 6); - - assert(tm->tm_yday >= 0); - assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]); - -#ifdef HAS_TM_TM_GMTOFF - assert(tm->tm_gmtoff >= -24 * 60 * 60); - assert(tm->tm_gmtoff <= 24 * 60 * 60); -#endif - - return 1; -} - - -/* The exceptional centuries without leap years cause the cycle to - shift by 16 -*/ -static Year cycle_offset(Year year) -{ - const Year start_year = 2000; - Year year_diff = year - start_year; - Year exceptions; - - if( year > start_year ) - year_diff--; - - exceptions = year_diff / 100; - exceptions -= year_diff / 400; - - TIME64_TRACE3("# year: %lld, exceptions: %lld, year_diff: %lld\n", - year, exceptions, year_diff); - - return exceptions * 16; -} - -/* For a given year after 2038, pick the latest possible matching - year in the 28 year calendar cycle. - - A matching year... - 1) Starts on the same day of the week. - 2) Has the same leap year status. - - This is so the calendars match up. - - Also the previous year must match. When doing Jan 1st you might - wind up on Dec 31st the previous year when doing a -UTC time zone. - - Finally, the next year must have the same start day of week. This - is for Dec 31st with a +UTC time zone. - It doesn't need the same leap year status since we only care about - January 1st. -*/ -static int safe_year(const Year year) -{ - int safe_year = 0; - Year year_cycle; - - if( year >= MIN_SAFE_YEAR && year <= MAX_SAFE_YEAR ) { - return (int)year; - } - - year_cycle = year + cycle_offset(year); - - /* safe_years_low is off from safe_years_high by 8 years */ - if( year < MIN_SAFE_YEAR ) - year_cycle -= 8; - - /* Change non-leap xx00 years to an equivalent */ - if( is_exception_century(year) ) - year_cycle += 11; - - /* Also xx01 years, since the previous year will be wrong */ - if( is_exception_century(year - 1) ) - year_cycle += 17; - - year_cycle %= SOLAR_CYCLE_LENGTH; - if( year_cycle < 0 ) - year_cycle = SOLAR_CYCLE_LENGTH + year_cycle; - - assert( year_cycle >= 0 ); - assert( year_cycle < SOLAR_CYCLE_LENGTH ); - if( year < MIN_SAFE_YEAR ) - safe_year = safe_years_low[year_cycle]; - else if( year > MAX_SAFE_YEAR ) - safe_year = safe_years_high[year_cycle]; - else - assert(0); - - TIME64_TRACE3("# year: %lld, year_cycle: %lld, safe_year: %d\n", - year, year_cycle, safe_year); - - assert(safe_year <= MAX_SAFE_YEAR && safe_year >= MIN_SAFE_YEAR); - - return safe_year; -} - - -void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { - if( src == NULL ) { - memset(dest, 0, sizeof(*dest)); - } - else { -# ifdef USE_TM64 - dest->tm_sec = src->tm_sec; - dest->tm_min = src->tm_min; - dest->tm_hour = src->tm_hour; - dest->tm_mday = src->tm_mday; - dest->tm_mon = src->tm_mon; - dest->tm_year = (Year)src->tm_year; - dest->tm_wday = src->tm_wday; - dest->tm_yday = src->tm_yday; - dest->tm_isdst = src->tm_isdst; - -# ifdef HAS_TM_TM_GMTOFF - dest->tm_gmtoff = src->tm_gmtoff; -# endif - -# ifdef HAS_TM_TM_ZONE - dest->tm_zone = src->tm_zone; -# endif - -# else - /* They're the same type */ - memcpy(dest, src, sizeof(*dest)); -# endif - } -} - - -void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { - if( src == NULL ) { - memset(dest, 0, sizeof(*dest)); - } - else { -# ifdef USE_TM64 - dest->tm_sec = src->tm_sec; - dest->tm_min = src->tm_min; - dest->tm_hour = src->tm_hour; - dest->tm_mday = src->tm_mday; - dest->tm_mon = src->tm_mon; - dest->tm_year = (int)src->tm_year; - dest->tm_wday = src->tm_wday; - dest->tm_yday = src->tm_yday; - dest->tm_isdst = src->tm_isdst; - -# ifdef HAS_TM_TM_GMTOFF - dest->tm_gmtoff = src->tm_gmtoff; -# endif - -# ifdef HAS_TM_TM_ZONE - dest->tm_zone = src->tm_zone; -# endif - -# else - /* They're the same type */ - memcpy(dest, src, sizeof(*dest)); -# endif - } -} - - -/* Simulate localtime_r() to the best of our ability */ -struct tm * fake_localtime_r(const time_t *time, struct tm *result) { - const struct tm *static_result = localtime(time); - - assert(result != NULL); - - if( static_result == NULL ) { - memset(result, 0, sizeof(*result)); - return NULL; - } - else { - memcpy(result, static_result, sizeof(*result)); - return result; - } -} - - -/* Simulate gmtime_r() to the best of our ability */ -struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { - const struct tm *static_result = gmtime(time); - - assert(result != NULL); - - if( static_result == NULL ) { - memset(result, 0, sizeof(*result)); - return NULL; - } - else { - memcpy(result, static_result, sizeof(*result)); - return result; - } -} - - -static Time64_T seconds_between_years(Year left_year, Year right_year) { - int increment = (left_year > right_year) ? 1 : -1; - Time64_T seconds = 0; - int cycles; - - if( left_year > 2400 ) { - cycles = (int)((left_year - 2400) / 400); - left_year -= cycles * 400; - seconds += cycles * seconds_in_gregorian_cycle; - } - else if( left_year < 1600 ) { - cycles = (int)((left_year - 1600) / 400); - left_year += cycles * 400; - seconds += cycles * seconds_in_gregorian_cycle; - } - - while( left_year != right_year ) { - seconds += length_of_year[IS_LEAP(right_year - 1900)] * 60 * 60 * 24; - right_year += increment; - } - - return seconds * increment; -} - - -Time64_T mktime64(const struct TM *input_date) { - struct tm safe_date; - struct TM date; - Time64_T time; - Year year = input_date->tm_year + 1900; - - if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) - { - copy_TM64_to_tm(input_date, &safe_date); - return (Time64_T)mktime(&safe_date); - } - - /* Have to make the year safe in date else it won't fit in safe_date */ - date = *input_date; - date.tm_year = safe_year(year) - 1900; - copy_TM64_to_tm(&date, &safe_date); - - time = (Time64_T)mktime(&safe_date); - - time += seconds_between_years(year, (Year)(safe_date.tm_year + 1900)); - - return time; -} - - -/* Because I think mktime() is a crappy name */ -Time64_T timelocal64(const struct TM *date) { - return mktime64(date); -} - - -struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) -{ - int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; - Time64_T v_tm_tday; - int leap; - Time64_T m; - Time64_T time = *in_time; - Year year = 70; - int cycles = 0; - - assert(p != NULL); - - /* Use the system gmtime() if time_t is small enough */ - if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) { - time_t safe_time = (time_t)*in_time; - struct tm safe_date; - GMTIME_R(&safe_time, &safe_date); - - copy_tm_to_TM64(&safe_date, p); - assert(check_tm(p)); - - return p; - } - -#ifdef HAS_TM_TM_GMTOFF - p->tm_gmtoff = 0; -#endif - p->tm_isdst = 0; - -#ifdef HAS_TM_TM_ZONE - p->tm_zone = "UTC"; -#endif - - v_tm_sec = (int)(time % 60); - time /= 60; - v_tm_min = (int)(time % 60); - time /= 60; - v_tm_hour = (int)(time % 24); - time /= 24; - v_tm_tday = time; - - WRAP (v_tm_sec, v_tm_min, 60); - WRAP (v_tm_min, v_tm_hour, 60); - WRAP (v_tm_hour, v_tm_tday, 24); - - v_tm_wday = (int)((v_tm_tday + 4) % 7); - if (v_tm_wday < 0) - v_tm_wday += 7; - m = v_tm_tday; - - if (m >= CHEAT_DAYS) { - year = CHEAT_YEARS; - m -= CHEAT_DAYS; - } - - if (m >= 0) { - /* Gregorian cycles, this is huge optimization for distant times */ - cycles = (int)(m / (Time64_T) days_in_gregorian_cycle); - if( cycles ) { - m -= (cycles * (Time64_T) days_in_gregorian_cycle); - year += (cycles * years_in_gregorian_cycle); - } - - /* Years */ - leap = IS_LEAP (year); - while (m >= (Time64_T) length_of_year[leap]) { - m -= (Time64_T) length_of_year[leap]; - year++; - leap = IS_LEAP (year); - } - - /* Months */ - v_tm_mon = 0; - while (m >= (Time64_T) days_in_month[leap][v_tm_mon]) { - m -= (Time64_T) days_in_month[leap][v_tm_mon]; - v_tm_mon++; - } - } else { - year--; - - /* Gregorian cycles */ - cycles = (int)((m / (Time64_T) days_in_gregorian_cycle) + 1); - if( cycles ) { - m -= (cycles * (Time64_T) days_in_gregorian_cycle); - year += (cycles * years_in_gregorian_cycle); - } - - /* Years */ - leap = IS_LEAP (year); - while (m < (Time64_T) -length_of_year[leap]) { - m += (Time64_T) length_of_year[leap]; - year--; - leap = IS_LEAP (year); - } - - /* Months */ - v_tm_mon = 11; - while (m < (Time64_T) -days_in_month[leap][v_tm_mon]) { - m += (Time64_T) days_in_month[leap][v_tm_mon]; - v_tm_mon--; - } - m += (Time64_T) days_in_month[leap][v_tm_mon]; - } - - p->tm_year = (int)year; - if( p->tm_year != year ) { -#ifdef EOVERFLOW - errno = EOVERFLOW; -#endif - return NULL; - } - - /* At this point m is less than a year so casting to an int is safe */ - p->tm_mday = (int) m + 1; - p->tm_yday = julian_days_by_month[leap][v_tm_mon] + (int)m; - p->tm_sec = v_tm_sec; - p->tm_min = v_tm_min; - p->tm_hour = v_tm_hour; - p->tm_mon = v_tm_mon; - p->tm_wday = v_tm_wday; - - assert(check_tm(p)); - - return p; -} - - -struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) -{ - time_t safe_time; - struct tm safe_date; - struct TM gm_tm; - Year orig_year; - int month_diff; - - assert(local_tm != NULL); - - /* Use the system localtime() if time_t is small enough */ - if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) { - safe_time = (time_t)*time; - - TIME64_TRACE1("Using system localtime for %lld\n", *time); - - LOCALTIME_R(&safe_time, &safe_date); - - copy_tm_to_TM64(&safe_date, local_tm); - assert(check_tm(local_tm)); - - return local_tm; - } - - if( gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); - return NULL; - } - - orig_year = gm_tm.tm_year; - - if (gm_tm.tm_year > (2037 - 1900) || - gm_tm.tm_year < (1970 - 1900) - ) - { - TIME64_TRACE1("Mapping tm_year %lld to safe_year\n", (Year)gm_tm.tm_year); - gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; - } - - safe_time = (time_t)timegm64(&gm_tm); - if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { - TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); - return NULL; - } - - copy_tm_to_TM64(&safe_date, local_tm); - - local_tm->tm_year = (int)orig_year; - if( local_tm->tm_year != orig_year ) { - TIME64_TRACE2("tm_year overflow: tm_year %lld, orig_year %lld\n", - (Year)local_tm->tm_year, (Year)orig_year); - -#ifdef EOVERFLOW - errno = EOVERFLOW; -#endif - return NULL; - } - - - month_diff = local_tm->tm_mon - gm_tm.tm_mon; - - /* When localtime is Dec 31st previous year and - gmtime is Jan 1st next year. - */ - if( month_diff == 11 ) { - local_tm->tm_year--; - } - - /* When localtime is Jan 1st, next year and - gmtime is Dec 31st, previous year. - */ - if( month_diff == -11 ) { - local_tm->tm_year++; - } - - /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st - in a non-leap xx00. There is one point in the cycle - we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as - the 366th day of the year. - */ - if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) - local_tm->tm_yday--; - - assert(check_tm(local_tm)); - - return local_tm; -} - - -int valid_tm_wday( const struct TM* date ) { - if( 0 <= date->tm_wday && date->tm_wday <= 6 ) - return 1; - else - return 0; -} - -int valid_tm_mon( const struct TM* date ) { - if( 0 <= date->tm_mon && date->tm_mon <= 11 ) - return 1; - else - return 0; -} - - -char *asctime64_r( const struct TM* date, char *result ) { - /* I figure everything else can be displayed, even hour 25, but if - these are out of range we walk off the name arrays */ - if( !valid_tm_wday(date) || !valid_tm_mon(date) ) - return NULL; - - sprintf(result, TM64_ASCTIME_FORMAT, - wday_name[date->tm_wday], - mon_name[date->tm_mon], - date->tm_mday, date->tm_hour, - date->tm_min, date->tm_sec, - 1900 + date->tm_year); - - return result; -} - - -char *ctime64_r( const Time64_T* time, char* result ) { - struct TM date; - - localtime64_r( time, &date ); - return asctime64_r( &date, result ); -} - - -/* Non-thread safe versions of the above */ -struct TM *localtime64(const Time64_T *time) { -#ifdef _MSC_VER - _tzset(); -#else - tzset(); -#endif - return localtime64_r(time, &Static_Return_Date); -} - -struct TM *gmtime64(const Time64_T *time) { - return gmtime64_r(time, &Static_Return_Date); -} - -char *asctime64( const struct TM* date ) { - return asctime64_r( date, Static_Return_String ); -} - -char *ctime64( const Time64_T* time ) { -#ifdef _MSC_VER - _tzset(); -#else - tzset(); -#endif - return asctime64(localtime64(time)); -} diff -Nru pymongo-1.11/bson/time64_config.h pymongo-1.7/bson/time64_config.h --- pymongo-1.11/bson/time64_config.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/time64_config.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -/* Configuration - ------------- - Define as appropriate for your system. - Sensible defaults provided. -*/ - - -#ifndef TIME64_CONFIG_H -# define TIME64_CONFIG_H - -/* Debugging - TIME_64_DEBUG - Define if you want debugging messages -*/ -/* #define TIME_64_DEBUG */ - - -/* INT_64_T - A 64 bit integer type to use to store time and others. - Must be defined. -*/ -#define INT_64_T long long - - -/* USE_TM64 - Should we use a 64 bit safe replacement for tm? This will - let you go past year 2 billion but the struct will be incompatible - with tm. Conversion functions will be provided. -*/ -/* #define USE_TM64 */ - - -/* Availability of system functions. - - HAS_GMTIME_R - Define if your system has gmtime_r() - - HAS_LOCALTIME_R - Define if your system has localtime_r() - - HAS_TIMEGM - Define if your system has timegm(), a GNU extension. -*/ -#if !defined(WIN32) && !defined(_MSC_VER) -#define HAS_GMTIME_R -#define HAS_LOCALTIME_R -#endif -/* #define HAS_TIMEGM */ - - -/* Details of non-standard tm struct elements. - - HAS_TM_TM_GMTOFF - True if your tm struct has a "tm_gmtoff" element. - A BSD extension. - - HAS_TM_TM_ZONE - True if your tm struct has a "tm_zone" element. - A BSD extension. -*/ -/* #define HAS_TM_TM_GMTOFF */ -/* #define HAS_TM_TM_ZONE */ - - -/* USE_SYSTEM_LOCALTIME - USE_SYSTEM_GMTIME - USE_SYSTEM_MKTIME - USE_SYSTEM_TIMEGM - Should we use the system functions if the time is inside their range? - Your system localtime() is probably more accurate, but our gmtime() is - fast and safe. -*/ -#define USE_SYSTEM_LOCALTIME -/* #define USE_SYSTEM_GMTIME */ -#define USE_SYSTEM_MKTIME -/* #define USE_SYSTEM_TIMEGM */ - -#endif /* TIME64_CONFIG_H */ diff -Nru pymongo-1.11/bson/time64.h pymongo-1.7/bson/time64.h --- pymongo-1.11/bson/time64.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/time64.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -#ifndef TIME64_H -# define TIME64_H - -#include -#include "time64_config.h" - -/* Set our custom types */ -typedef INT_64_T Int64; -typedef Int64 Time64_T; -typedef Int64 Year; - - -/* A copy of the tm struct but with a 64 bit year */ -struct TM64 { - int tm_sec; - int tm_min; - int tm_hour; - int tm_mday; - int tm_mon; - Year tm_year; - int tm_wday; - int tm_yday; - int tm_isdst; - -#ifdef HAS_TM_TM_GMTOFF - long tm_gmtoff; -#endif - -#ifdef HAS_TM_TM_ZONE - char *tm_zone; -#endif -}; - - -/* Decide which tm struct to use */ -#ifdef USE_TM64 -#define TM TM64 -#else -#define TM tm -#endif - - -/* Declare public functions */ -struct TM *gmtime64_r (const Time64_T *, struct TM *); -struct TM *localtime64_r (const Time64_T *, struct TM *); -struct TM *gmtime64 (const Time64_T *); -struct TM *localtime64 (const Time64_T *); - -char *asctime64 (const struct TM *); -char *asctime64_r (const struct TM *, char *); - -char *ctime64 (const Time64_T*); -char *ctime64_r (const Time64_T*, char*); - -Time64_T timegm64 (const struct TM *); -Time64_T mktime64 (const struct TM *); -Time64_T timelocal64 (const struct TM *); - - -/* Not everyone has gm/localtime_r(), provide a replacement */ -#ifdef HAS_LOCALTIME_R -# define LOCALTIME_R(clock, result) localtime_r(clock, result) -#else -# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) -#endif -#ifdef HAS_GMTIME_R -# define GMTIME_R(clock, result) gmtime_r(clock, result) -#else -# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) -#endif - - -/* Use a different asctime format depending on how big the year is */ -#ifdef USE_TM64 - #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %lld\n" -#else - #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %d\n" -#endif - - -#endif diff -Nru pymongo-1.11/bson/time64_limits.h pymongo-1.7/bson/time64_limits.h --- pymongo-1.11/bson/time64_limits.h 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/time64_limits.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -/* - Maximum and minimum inputs your system's respective time functions - can correctly handle. time64.h will use your system functions if - the input falls inside these ranges and corresponding USE_SYSTEM_* - constant is defined. -*/ - -#ifndef TIME64_LIMITS_H -#define TIME64_LIMITS_H - -/* Max/min for localtime() */ -#define SYSTEM_LOCALTIME_MAX 2147483647 -#define SYSTEM_LOCALTIME_MIN -2147483647-1 - -/* Max/min for gmtime() */ -#define SYSTEM_GMTIME_MAX 2147483647 -#define SYSTEM_GMTIME_MIN -2147483647-1 - -/* Max/min for mktime() */ -static const struct tm SYSTEM_MKTIME_MAX = { - 7, - 14, - 19, - 18, - 0, - 138, - 1, - 17, - 0 -#ifdef HAS_TM_TM_GMTOFF - ,-28800 -#endif -#ifdef HAS_TM_TM_ZONE - ,"PST" -#endif -}; - -static const struct tm SYSTEM_MKTIME_MIN = { - 52, - 45, - 12, - 13, - 11, - 1, - 5, - 346, - 0 -#ifdef HAS_TM_TM_GMTOFF - ,-28800 -#endif -#ifdef HAS_TM_TM_ZONE - ,"PST" -#endif -}; - -/* Max/min for timegm() */ -#ifdef HAS_TIMEGM -static const struct tm SYSTEM_TIMEGM_MAX = { - 7, - 14, - 3, - 19, - 0, - 138, - 2, - 18, - 0 - #ifdef HAS_TM_TM_GMTOFF - ,0 - #endif - #ifdef HAS_TM_TM_ZONE - ,"UTC" - #endif -}; - -static const struct tm SYSTEM_TIMEGM_MIN = { - 52, - 45, - 20, - 13, - 11, - 1, - 5, - 346, - 0 - #ifdef HAS_TM_TM_GMTOFF - ,0 - #endif - #ifdef HAS_TM_TM_ZONE - ,"UTC" - #endif -}; -#endif /* HAS_TIMEGM */ - -#endif /* TIME64_LIMITS_H */ diff -Nru pymongo-1.11/bson/timestamp.py pymongo-1.7/bson/timestamp.py --- pymongo-1.11/bson/timestamp.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/timestamp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -# Copyright 2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for representing MongoDB internal Timestamps. -""" - -import calendar -import datetime - -from bson.tz_util import utc - - -class Timestamp(object): - """MongoDB internal timestamps used in the opLog. - """ - - def __init__(self, time, inc): - """Create a new :class:`Timestamp`. - - This class is only for use with the MongoDB opLog. If you need - to store a regular timestamp, please use a - :class:`~datetime.datetime`. - - Raises :class:`TypeError` if `time` is not an instance of - :class: `int` or :class:`~datetime.datetime`, or `inc` is not - an instance of :class:`int`. Raises :class:`ValueError` if - `time` or `inc` is not in [0, 2**32). - - :Parameters: - - `time`: time in seconds since epoch UTC, or a naive UTC - :class:`~datetime.datetime`, or an aware - :class:`~datetime.datetime` - - `inc`: the incrementing counter - - .. versionchanged:: 1.7 - `time` can now be a :class:`~datetime.datetime` instance. - """ - if isinstance(time, datetime.datetime): - if time.utcoffset() is not None: - time = time - time.utcoffset() - time = int(calendar.timegm(time.timetuple())) - if not isinstance(time, (int, long)): - raise TypeError("time must be an instance of int") - if not isinstance(inc, (int, long)): - raise TypeError("inc must be an instance of int") - if not 0 <= time < 2 ** 32: - raise ValueError("time must be contained in [0, 2**32)") - if not 0 <= inc < 2 ** 32: - raise ValueError("inc must be contained in [0, 2**32)") - - self.__time = time - self.__inc = inc - - @property - def time(self): - """Get the time portion of this :class:`Timestamp`. - """ - return self.__time - - @property - def inc(self): - """Get the inc portion of this :class:`Timestamp`. - """ - return self.__inc - - def __eq__(self, other): - if isinstance(other, Timestamp): - return (self.__time == other.time and self.__inc == other.inc) - else: - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) - - def as_datetime(self): - """Return a :class:`~datetime.datetime` instance corresponding - to the time portion of this :class:`Timestamp`. - - .. versionchanged:: 1.8 - The returned datetime is now timezone aware. - """ - return datetime.datetime.fromtimestamp(self.__time, utc) diff -Nru pymongo-1.11/bson/tz_util.py pymongo-1.7/bson/tz_util.py --- pymongo-1.11/bson/tz_util.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/bson/tz_util.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -# Copyright 2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Timezone related utilities for BSON.""" - -from datetime import (timedelta, - tzinfo) - -ZERO = timedelta(0) - - -class FixedOffset(tzinfo): - """Fixed offset timezone, in minutes east from UTC. - - Implementation from the Python `standard library documentation - `_. - """ - - def __init__(self, offset, name): - self.__offset = timedelta(minutes=offset) - self.__name = name - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return self.__name - - def dst(self, dt): - return ZERO - - -utc = FixedOffset(0, "UTC") -"""Fixed offset timezone representing UTC.""" diff -Nru pymongo-1.11/debian/changelog pymongo-1.7/debian/changelog --- pymongo-1.11/debian/changelog 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/changelog 2011-08-25 17:26:53.000000000 +0000 @@ -1,47 +1,8 @@ -pymongo (1.11-1) unstable; urgency=low +pymongo (1.7-1~openstack~lucid1) lucid; urgency=low - * New upstream release, http://api.mongodb.org/python/1.11/changelog.html + * Backported pymongo to lucid. - -- Federico Ceratto Tue, 14 Jun 2011 15:10:53 +0000 - -pymongo (1.10.1-1) unstable; urgency=low - - [ Federico Ceratto ] - * New upstream release. - * Standards-Version bumped to 3.9.2 - * Minor updates. - - -- Federico Ceratto Wed, 13 Apr 2011 22:54:38 +0200 - -pymongo (1.10-1) unstable; urgency=low - - [ Janos Guljas ] - * New upstream release. - - [ Federico Ceratto ] - * FTBFS fixed (Closes: #620069). - - -- Federico Ceratto Thu, 31 Mar 2011 20:17:41 +0000 - -pymongo (1.9-1) unstable; urgency=low - - [ Federico Ceratto ] - * New upstream release (Closes: #618271). - - [ Janos Guljas ] - * Fix debian/python-pymongo.install (Closes: #597085). - * Switch to dh_python2. - * Add binary package python-pymongo-doc. - * Add binary package python-bson. - - -- Federico Ceratto Sat, 26 Mar 2011 11:58:19 +0100 - -pymongo (1.8.1-1) unstable; urgency=low - - * New upstream release. - * Standards-Version bumped to 3.9.1 - - -- Federico Ceratto Sun, 22 Aug 2010 15:12:23 +0100 + -- Monty Taylor Thu, 25 Aug 2011 10:26:36 -0700 pymongo (1.7-1) unstable; urgency=low diff -Nru pymongo-1.11/debian/compat pymongo-1.7/debian/compat --- pymongo-1.11/debian/compat 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/compat 2010-06-24 18:22:06.000000000 +0000 @@ -1 +1 @@ -8 +7 diff -Nru pymongo-1.11/debian/control pymongo-1.7/debian/control --- pymongo-1.11/debian/control 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/control 2010-06-24 18:22:06.000000000 +0000 @@ -2,52 +2,26 @@ Section: python Priority: optional Maintainer: Federico Ceratto -Uploaders: Janos Guljas -DM-Upload-Allowed: yes -Build-Depends: debhelper (>= 8.0.0~), - python-all-dev, - python-setuptools (>= 0.6.14), - python-sphinx -Standards-Version: 3.9.2 +Build-Depends: debhelper (>= 7.0.50~), python-all-dev, python-support (>= 0.90.0~), python-setuptools +Standards-Version: 3.8.4 Homepage: http://api.mongodb.org/python/ Vcs-Git: git://git.debian.org/git/collab-maint/pymongo.git Vcs-Browser: http://git.debian.org/?p=collab-maint/pymongo.git Package: python-pymongo Architecture: any -Depends: python-bson (= ${binary:Version}), ${shlibs:Depends}, ${python:Depends}, ${misc:Depends} -Recommends: python-gridfs (>= ${source:Version}) -Provides: ${python:Provides} +Depends: ${shlibs:Depends}, ${python:Depends}, ${misc:Depends} Description: Python interface to the MongoDB document-oriented database MongoDB is a high-performance, open source, schema-free document-oriented data store. Pymongo provides an interface to easily access it from Python. -Package: python-pymongo-doc -Architecture: all -Depends: ${misc:Depends}, libjs-jquery -Section: doc -Description: Python interface to the MongoDB document-oriented database (documentation) - MongoDB is a high-performance, open source, schema-free - document-oriented data store. Pymongo provides an interface - to easily access it from Python. - . - This package contains the HTML documentation. - Package: python-gridfs Architecture: all -Depends: ${python:Depends}, ${misc:Depends} -Provides: ${python:Provides} +Depends: python-pymongo, ${python:Depends}, ${misc:Depends} Description: Python implementation of GridFS for MongoDB GridFS is a storage specification for large objects in MongoDB. The Python gridfs module is an implementation based on the pymongo module. It provides a file-like interface. -Package: python-bson -Architecture: any -Depends: ${python:Depends}, ${misc:Depends}, ${shlibs:Depends} -Provides: ${python:Provides} -Description: Python implementation of BSON for MongoDB - BSON Python module contains all of the Binary JSON encoding and decoding - logic for MongoDB. diff -Nru pymongo-1.11/debian/copyright pymongo-1.7/debian/copyright --- pymongo-1.11/debian/copyright 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/copyright 2010-06-24 18:22:06.000000000 +0000 @@ -1,25 +1,39 @@ -Format: http://dep.debian.net/deps/dep5 -Source: http://api.mongodb.org/python/ +This work was packaged for Debian by: -Files: * -Copyright: © 2011, Mike Dirolf -License: Apache - -Files: debian/* -Copyright: © 2011, Federico Ceratto - © 2011, Janos Guljas -License: Apache - -License: Apache - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Federico Ceratto on Mon, 05 Apr 2010 15:50:58 +0100 +It was downloaded from: + + http://api.mongodb.org/python/ + +Upstream Author: + + Mike Dirolf + +Copyright: + + Copyright (C) 2010 Mike Dirolf + +License: + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +On Debian systems, the complete text of the Apache version 2.0 license +can be found in "/usr/share/common-licenses/Apache-2.0". + +The Debian packaging is: + + Copyright (C) 2010 Federico Ceratto + +and is licensed under the Apache version 2.0, +see "/usr/share/common-licenses/Apache-2.0". diff -Nru pymongo-1.11/debian/pymongo.1 pymongo-1.7/debian/pymongo.1 --- pymongo-1.11/debian/pymongo.1 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/debian/pymongo.1 2010-06-24 18:22:06.000000000 +0000 @@ -0,0 +1,58 @@ +.TH PYMONGO 8 "March 13, 2010" +.SH NAME +pymongo \- Python interface for MongoDB +.SH SYNOPSIS +.B pymongo +.br +.SH DESCRIPTION +.br +This manual page documents briefly the +.B pymongo +python library. +.PP +.SH USAGE +Assuming that a running instance of MongoDB is available, setup a connection to the daemon: + +>>> from pymongo import Connection +.br +>>> connection = Connection('localhost', 27017) + +If you are happy with the default values (localhost) you can simply use Connection() +.br +A single instance of MongoDB can support multiple independent databases. + +Select the database you want to use: + +>>> db = connection.test_database + +You can use dictionary-style access instead: + +>>> db = connection['test-database'] + +A collection is a group of documents stored in MongoDB, a bit like a table in a relational database. +.br +Get a collection: + +>>> collection = db.test_collection +.br +Or: +.br +>>> collection = db['test-collection'] + +Warning: Collections and databases are created when the first document is inserted into them. +.br +Example of data insertion: + +>>> item = {"author": "Me","title": "This is a test"} +>>> collection.insert(item) + +Comprehensive documentation can be found at: +.br +http://api.mongodb.org/python/ + +.br +.SH AUTHOR +pymongo was written by Mike Dirolf. +.PP +This manual page was written by Federico Ceratto , +for the Debian project (and may be used by others). diff -Nru pymongo-1.11/debian/pymongo.3 pymongo-1.7/debian/pymongo.3 --- pymongo-1.11/debian/pymongo.3 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/pymongo.3 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -.TH PYMONGO 3 "March 13, 2010" -.SH NAME -pymongo \- Python interface for MongoDB -.SH SYNOPSIS -.B pymongo -.br -.SH DESCRIPTION -.br -This manual page documents briefly the -.B pymongo -python library. -.PP -.SH USAGE -Assuming that a running instance of MongoDB is available, setup a connection to the daemon: - ->>> from pymongo import Connection -.br ->>> connection = Connection('localhost', 27017) - -If you are happy with the default values (localhost) you can simply use Connection() -.br -A single instance of MongoDB can support multiple independent databases. - -Select the database you want to use: - ->>> db = connection.test_database - -You can use dictionary-style access instead: - ->>> db = connection['test-database'] - -A collection is a group of documents stored in MongoDB, a bit like a table in a relational database. -.br -Get a collection: - ->>> collection = db.test_collection -.br -Or: -.br ->>> collection = db['test-collection'] - -Warning: Collections and databases are created when the first document is inserted into them. -.br -Example of data insertion: - ->>> item = {"author": "Me","title": "This is a test"} ->>> collection.insert(item) - -Comprehensive documentation can be found at: -.br -http://api.mongodb.org/python/ - -.br -.SH AUTHOR -pymongo was written by Mike Dirolf. -.PP -This manual page was written by Federico Ceratto , -for the Debian project (and may be used by others). diff -Nru pymongo-1.11/debian/python-bson.install pymongo-1.7/debian/python-bson.install --- pymongo-1.11/debian/python-bson.install 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-bson.install 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -usr/lib/python*/*-packages/bson* diff -Nru pymongo-1.11/debian/python-gridfs.install pymongo-1.7/debian/python-gridfs.install --- pymongo-1.11/debian/python-gridfs.install 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-gridfs.install 2010-06-24 18:22:06.000000000 +0000 @@ -1 +1 @@ -usr/lib/python*/*-packages/gridfs* +usr/lib/python*/*-packages/gridfs diff -Nru pymongo-1.11/debian/python-pymongo-doc.doc-base pymongo-1.7/debian/python-pymongo-doc.doc-base --- pymongo-1.11/debian/python-pymongo-doc.doc-base 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-pymongo-doc.doc-base 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Document: python-pymongo-doc -Title: PyMongo Documentation -Author: Michael Dirolf -Abstract: PyMongo is a Python distribution containing tools for working with - MongoDB, and is the recommended way to work with MongoDB from Python. This - documentation attempts to explain everything you need to know to use PyMongo. -Section: Programming/Python - -Format: HTML -Index: /usr/share/doc/python-pymongo-doc/html/index.html -Files: /usr/share/doc/python-pymongo-doc/html/*.html - /usr/share/doc/python-pymongo-doc/html/api/*.html - /usr/share/doc/python-pymongo-doc/html/examples/*.html diff -Nru pymongo-1.11/debian/python-pymongo-doc.docs pymongo-1.7/debian/python-pymongo-doc.docs --- pymongo-1.11/debian/python-pymongo-doc.docs 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-pymongo-doc.docs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -README.rst -debian/html diff -Nru pymongo-1.11/debian/python-pymongo-doc.links pymongo-1.7/debian/python-pymongo-doc.links --- pymongo-1.11/debian/python-pymongo-doc.links 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-pymongo-doc.links 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -/usr/share/javascript/jquery/jquery.js usr/share/doc/python-pymongo-doc/html/_static/jquery.js - diff -Nru pymongo-1.11/debian/python-pymongo.install pymongo-1.7/debian/python-pymongo.install --- pymongo-1.11/debian/python-pymongo.install 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/python-pymongo.install 2010-06-24 18:22:06.000000000 +0000 @@ -1 +1 @@ -usr/lib/python*/*-packages/pymongo* +usr/lib/python*/*-packages/pymongo diff -Nru pymongo-1.11/debian/pyversions pymongo-1.7/debian/pyversions --- pymongo-1.11/debian/pyversions 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/pyversions 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2.5- diff -Nru pymongo-1.11/debian/rules pymongo-1.7/debian/rules --- pymongo-1.11/debian/rules 2011-06-14 15:38:12.000000000 +0000 +++ pymongo-1.7/debian/rules 2010-06-24 18:22:06.000000000 +0000 @@ -1,23 +1,9 @@ #!/usr/bin/make -f -export DH_VERBOSE=1 - -%: - dh $@ --with python2 - -override_dh_auto_build: - dh_auto_build - sphinx-build -b html doc debian/html - rm -rf debian/html/_sources/ - rm -f debian/python-pymongo-doc/usr/share/doc/python-pymongo-doc/html/_static/jquery.js - rm -rf debian/python-pymongo-doc/usr/share/doc/python-pymongo-doc/html/.doctrees - -override_dh_installchangelogs: - dh_installchangelogs doc/changelog.rst override_dh_auto_clean: - rm -rf debian/html - find doc -name "*.pyc" -exec rm -rf {} + dh_auto_clean + rm -rf pymongo.egg-info/ + +%: + dh $@ -override_dh_compress: - dh_compress -X.html diff -Nru pymongo-1.11/doc/api/bson/binary.rst pymongo-1.7/doc/api/bson/binary.rst --- pymongo-1.11/doc/api/bson/binary.rst 2011-03-14 22:30:58.000000000 +0000 +++ pymongo-1.7/doc/api/bson/binary.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -:mod:`binary` -- Tools for representing binary data to be stored in MongoDB -=========================================================================== - -.. automodule:: bson.binary - :synopsis: Tools for representing binary data to be stored in MongoDB - - .. autodata:: BINARY_SUBTYPE - .. autodata:: FUNCTION_SUBTYPE - .. autodata:: OLD_BINARY_SUBTYPE - .. autodata:: UUID_SUBTYPE - .. autodata:: MD5_SUBTYPE - .. autodata:: USER_DEFINED_SUBTYPE - - .. autoclass:: Binary(data[, subtype=OLD_BINARY_SUBTYPE]) - :members: - :show-inheritance: diff -Nru pymongo-1.11/doc/api/bson/code.rst pymongo-1.7/doc/api/bson/code.rst --- pymongo-1.11/doc/api/bson/code.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/code.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -:mod:`code` -- Tools for representing JavaScript code -===================================================== - -.. automodule:: bson.code - :synopsis: Tools for representing JavaScript code - - .. autoclass:: Code(code[, scope=None[, **kwargs]]) - :members: - :show-inheritance: diff -Nru pymongo-1.11/doc/api/bson/dbref.rst pymongo-1.7/doc/api/bson/dbref.rst --- pymongo-1.11/doc/api/bson/dbref.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/dbref.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -:mod:`dbref` -- Tools for manipulating DBRefs (references to documents stored in MongoDB) -========================================================================================= - -.. automodule:: bson.dbref - :synopsis: Tools for manipulating DBRefs (references to documents stored in MongoDB) - :members: diff -Nru pymongo-1.11/doc/api/bson/errors.rst pymongo-1.7/doc/api/bson/errors.rst --- pymongo-1.11/doc/api/bson/errors.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/errors.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -:mod:`errors` -- Exceptions raised by the :mod:`bson` package -================================================================ - -.. automodule:: bson.errors - :synopsis: Exceptions raised by the bson package - :members: diff -Nru pymongo-1.11/doc/api/bson/index.rst pymongo-1.7/doc/api/bson/index.rst --- pymongo-1.11/doc/api/bson/index.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/index.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -:mod:`bson` -- BSON (Binary JSON) Encoding and Decoding -======================================================= - -.. automodule:: bson - :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: - -Sub-modules: - -.. toctree:: - :maxdepth: 2 - - binary - code - dbref - errors - json_util - max_key - min_key - objectid - son - timestamp - tz_util diff -Nru pymongo-1.11/doc/api/bson/json_util.rst pymongo-1.7/doc/api/bson/json_util.rst --- pymongo-1.11/doc/api/bson/json_util.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/json_util.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -:mod:`json_util` -- Tools for using Python's :mod:`json` module with BSON documents -====================================================================================== -.. versionadded:: 1.1.1 - -.. automodule:: bson.json_util - :synopsis: Tools for using Python's json module with BSON documents - :members: - :undoc-members: diff -Nru pymongo-1.11/doc/api/bson/max_key.rst pymongo-1.7/doc/api/bson/max_key.rst --- pymongo-1.11/doc/api/bson/max_key.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/max_key.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -:mod:`max_key` -- Representation for the MongoDB internal MaxKey type -===================================================================== -.. versionadded:: 1.7 - -.. automodule:: bson.max_key - :synopsis: Representation for the MongoDB internal MaxKey type - :members: diff -Nru pymongo-1.11/doc/api/bson/min_key.rst pymongo-1.7/doc/api/bson/min_key.rst --- pymongo-1.11/doc/api/bson/min_key.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/min_key.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -:mod:`min_key` -- Representation for the MongoDB internal MinKey type -===================================================================== -.. versionadded:: 1.7 - -.. automodule:: bson.min_key - :synopsis: Representation for the MongoDB internal MinKey type - :members: diff -Nru pymongo-1.11/doc/api/bson/objectid.rst pymongo-1.7/doc/api/bson/objectid.rst --- pymongo-1.11/doc/api/bson/objectid.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/objectid.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -:mod:`objectid` -- Tools for working with MongoDB ObjectIds -=========================================================== - -.. automodule:: bson.objectid - :synopsis: Tools for working with MongoDB ObjectIds - - .. autoclass:: pymongo.objectid.ObjectId([oid=None]) - :members: - - .. describe:: str(o) - - Get a hex encoded version of :class:`ObjectId` `o`. - - The following property always holds: - - .. testsetup:: - - from bson.objectid import ObjectId - - .. doctest:: - - >>> o = ObjectId() - >>> o == ObjectId(str(o)) - True - - This representation is useful for urls or other places where - ``o.binary`` is inappropriate. diff -Nru pymongo-1.11/doc/api/bson/son.rst pymongo-1.7/doc/api/bson/son.rst --- pymongo-1.11/doc/api/bson/son.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/son.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -:mod:`son` -- Tools for working with SON, an ordered mapping -============================================================ - -.. automodule:: bson.son - :synopsis: Tools for working with SON, an ordered mapping - :members: diff -Nru pymongo-1.11/doc/api/bson/timestamp.rst pymongo-1.7/doc/api/bson/timestamp.rst --- pymongo-1.11/doc/api/bson/timestamp.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/timestamp.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -:mod:`timestamp` -- Tools for representing MongoDB internal Timestamps -====================================================================== -.. versionadded:: 1.5 - -.. automodule:: bson.timestamp - :synopsis: Tools for representing MongoDB internal Timestamps - :members: diff -Nru pymongo-1.11/doc/api/bson/tz_util.rst pymongo-1.7/doc/api/bson/tz_util.rst --- pymongo-1.11/doc/api/bson/tz_util.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/bson/tz_util.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -:mod:`tz_util` -- Utilities for dealing with timezones in Python -================================================================ - -.. automodule:: bson.tz_util - :synopsis: Utilities for dealing with timezones in Python - :members: diff -Nru pymongo-1.11/doc/api/index.rst pymongo-1.7/doc/api/index.rst --- pymongo-1.11/doc/api/index.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/index.rst 2010-01-08 19:31:24.000000000 +0000 @@ -1,17 +1,14 @@ API Documentation ================= -The PyMongo distribution contains three top-level packages for -interacting with MongoDB. :mod:`bson` is an implementation of the -`BSON format `_, :mod:`pymongo` is a -full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools -for working with the `GridFS +The PyMongo distribution contains two root packages for interacting with +MongoDB. :mod:`pymongo` is a full-featured driver for MongoDB and +:mod:`gridfs` is a set of tools for working with the `GridFS `_ storage specification. .. toctree:: :maxdepth: 2 - bson/index pymongo/index gridfs/index diff -Nru pymongo-1.11/doc/api/pymongo/binary.rst pymongo-1.7/doc/api/pymongo/binary.rst --- pymongo-1.11/doc/api/pymongo/binary.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/binary.rst 2010-06-16 16:11:12.000000000 +0000 @@ -1,10 +1,16 @@ -:mod:`binary` -- MOVED -====================== +:mod:`binary` -- Tools for representing binary data to be stored in MongoDB +=========================================================================== -.. module:: pymongo.binary +.. automodule:: pymongo.binary + :synopsis: Tools for representing binary data to be stored in MongoDB -This module has been deprecated in favor of :mod:`bson.binary`. Please -use that module instead. + .. autodata:: BINARY_SUBTYPE + .. autodata:: FUNCTION_SUBTYPE + .. autodata:: OLD_BINARY_SUBTYPE + .. autodata:: UUID_SUBTYPE + .. autodata:: MD5_SUBTYPE + .. autodata:: USER_DEFINED_SUBTYPE -.. versionchanged:: 1.9 - Deprecated. + .. autoclass:: Binary(data[, subtype=BINARY_SUBTYPE]) + :members: + :show-inheritance: diff -Nru pymongo-1.11/doc/api/pymongo/bson.rst pymongo-1.7/doc/api/pymongo/bson.rst --- pymongo-1.11/doc/api/pymongo/bson.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/bson.rst 2010-05-19 14:01:01.000000000 +0000 @@ -1,10 +1,7 @@ -:mod:`bson` -- MOVED -==================== +:mod:`bson` -- Tools for working with `BSON `_ in Python +================================================================================================== -.. module:: pymongo.bson - -This module has been deprecated in favor of :mod:`bson`. Please use -that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.bson + :synopsis: Tools for working with BSON in Python + :members: + :show-inheritance: diff -Nru pymongo-1.11/doc/api/pymongo/code.rst pymongo-1.7/doc/api/pymongo/code.rst --- pymongo-1.11/doc/api/pymongo/code.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/code.rst 2010-01-08 19:31:24.000000000 +0000 @@ -1,10 +1,9 @@ -:mod:`code` -- MOVED -==================== +:mod:`code` -- Tools for representing JavaScript code to be evaluated by MongoDB +================================================================================ -.. module:: pymongo.code +.. automodule:: pymongo.code + :synopsis: Tools for representing JavaScript code to be evaluated by MongoDB -This module has been deprecated in favor of :mod:`bson.code`. Please -use that module instead. - -.. versionchanged:: 1.9 - Deprecated. + .. autoclass:: Code(code[, scope=None]) + :members: + :show-inheritance: diff -Nru pymongo-1.11/doc/api/pymongo/collection.rst pymongo-1.7/doc/api/pymongo/collection.rst --- pymongo-1.11/doc/api/pymongo/collection.rst 2011-05-05 21:41:53.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/collection.rst 2010-05-19 14:01:01.000000000 +0000 @@ -20,12 +20,11 @@ .. autoattribute:: full_name .. autoattribute:: name .. autoattribute:: database - .. automethod:: insert(doc_or_docs[, manipulate=True[, safe=False[, check_keys=True[, **kwargs]]]]) - .. automethod:: save(to_save[, manipulate=True[, safe=False[, **kwargs]]]) - .. automethod:: update(spec, document[, upsert=False[, manipulate=False[, safe=False[, multi=False[, **kwargs]]]]]) - .. automethod:: remove([spec_or_object_id=None[, safe=False[, **kwargs]]]) - .. automethod:: drop - .. automethod:: find([spec=None[, fields=None[, skip=0[, limit=0[, timeout=True[, snapshot=False[, tailable=False[, sort=None[, max_scan=None[, as_class=None[, slave_okay=False[, **kwargs]]]]]]]]]]]]) + .. automethod:: insert(doc_or_docs[, manipulate=True[, safe=False[, check_keys=True]]]) + .. automethod:: save(to_save[, manipulate=True[, safe=False]]) + .. automethod:: update(spec, document[, upsert=False[, manipulate=False[, safe=False[, multi=False]]]]) + .. automethod:: remove([spec_or_object_id=None[, safe=False]]) + .. automethod:: find([spec=None[, fields=None[, skip=0[, limit=0[, timeout=True[, snapshot=False[, tailable=False[, sort=None[, max_scan=None[, as_class=None]]]]]]]]]]) .. automethod:: find_one([spec_or_id=None[, *args[, **kwargs]]]) .. automethod:: count .. automethod:: create_index @@ -38,6 +37,4 @@ .. automethod:: rename .. automethod:: distinct .. automethod:: map_reduce - .. automethod:: inline_map_reduce - .. automethod:: find_and_modify diff -Nru pymongo-1.11/doc/api/pymongo/connection.rst pymongo-1.7/doc/api/pymongo/connection.rst --- pymongo-1.11/doc/api/pymongo/connection.rst 2011-05-05 21:37:51.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/connection.rst 2010-05-19 14:01:01.000000000 +0000 @@ -4,7 +4,7 @@ .. automodule:: pymongo.connection :synopsis: Tools for connecting to MongoDB - .. autoclass:: pymongo.connection.Connection([host='localhost'[, port=27017[, max_pool_size=10[, slave_okay=False[, network_timeout=None[, document_class=dict[, tz_aware=False]]]]]]]) + .. autoclass:: pymongo.connection.Connection([host='localhost'[, port=27017[, pool_size=None[, auto_start_request=None[, timeout=None[, slave_okay=False[, network_timeout=None[, document_class=dict]]]]]]]]) .. automethod:: from_uri([uri='mongodb://localhost']) .. automethod:: paired(left[, right=('localhost', 27017)]) @@ -18,10 +18,8 @@ .. autoattribute:: host .. autoattribute:: port - .. autoattribute:: nodes .. autoattribute:: slave_okay .. autoattribute:: document_class - .. autoattribute:: tz_aware .. automethod:: database_names .. automethod:: drop_database .. automethod:: copy_database(from_name, to_name[, from_host=None[, username=None[, password=None]]]) diff -Nru pymongo-1.11/doc/api/pymongo/database.rst pymongo-1.7/doc/api/pymongo/database.rst --- pymongo-1.11/doc/api/pymongo/database.rst 2011-03-17 21:33:56.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/database.rst 2010-02-04 16:37:46.000000000 +0000 @@ -3,20 +3,8 @@ .. automodule:: pymongo.database :synopsis: Database level operations + :members: .. autodata:: pymongo.OFF .. autodata:: pymongo.SLOW_ONLY .. autodata:: pymongo.ALL - - .. autoclass:: pymongo.database.Database - :members: - - .. describe:: db[collection_name] || db.collection_name - - Get the `collection_name` :class:`~pymongo.collection.Collection` of - :class:`Database` `db`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid collection name is used. - - .. autoclass:: pymongo.database.SystemJS - :members: diff -Nru pymongo-1.11/doc/api/pymongo/dbref.rst pymongo-1.7/doc/api/pymongo/dbref.rst --- pymongo-1.11/doc/api/pymongo/dbref.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/dbref.rst 2010-01-08 19:31:24.000000000 +0000 @@ -1,10 +1,6 @@ -:mod:`dbref` -- MOVED -===================== +:mod:`dbref` -- Tools for manipulating DBRefs (references to documents stored in MongoDB) +========================================================================================= -.. module:: pymongo.dbref - -This module has been deprecated in favor of :mod:`bson.dbref`. Please -use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.dbref + :synopsis: Tools for manipulating DBRefs (references to documents stored in MongoDB) + :members: diff -Nru pymongo-1.11/doc/api/pymongo/index.rst pymongo-1.7/doc/api/pymongo/index.rst --- pymongo-1.11/doc/api/pymongo/index.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/index.rst 2010-06-17 15:37:47.000000000 +0000 @@ -22,23 +22,16 @@ cursor errors master_slave_connection - message - son_manipulator - cursor_manager - -Deprecated sub-modules (moved to the :mod:`bson` package): - -.. toctree:: - :maxdepth: 2 - - bson - binary code dbref - json_util - max_key - min_key + binary objectid + bson + message son + son_manipulator timestamp - tz_util + min_key + max_key + json_util + cursor_manager diff -Nru pymongo-1.11/doc/api/pymongo/json_util.rst pymongo-1.7/doc/api/pymongo/json_util.rst --- pymongo-1.11/doc/api/pymongo/json_util.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/json_util.rst 2010-03-10 16:55:55.000000000 +0000 @@ -1,10 +1,8 @@ -:mod:`json_util` -- MOVED -========================= +:mod:`json_util` -- Tools for using Python's :mod:`json` module with MongoDB documents +====================================================================================== +.. versionadded:: 1.1.1 -.. module:: pymongo.json_util - -This module has been deprecated in favor of -:mod:`bson.json_util`. Please use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.json_util + :synopsis: Tools for using Python's json module with MongoDB documents + :members: + :undoc-members: diff -Nru pymongo-1.11/doc/api/pymongo/max_key.rst pymongo-1.7/doc/api/pymongo/max_key.rst --- pymongo-1.11/doc/api/pymongo/max_key.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/max_key.rst 2010-06-16 20:43:27.000000000 +0000 @@ -1,10 +1,7 @@ -:mod:`max_key` -- MOVED -======================= +:mod:`max_key` -- Representation for the MongoDB internal MaxKey type +===================================================================== +.. versionadded:: 1.7 -.. module:: pymongo.max_key - -This module has been deprecated in favor of -:mod:`bson.max_key`. Please use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.max_key + :synopsis: Representation for the MongoDB internal MaxKey type + :members: diff -Nru pymongo-1.11/doc/api/pymongo/min_key.rst pymongo-1.7/doc/api/pymongo/min_key.rst --- pymongo-1.11/doc/api/pymongo/min_key.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/min_key.rst 2010-06-16 20:42:12.000000000 +0000 @@ -1,10 +1,7 @@ -:mod:`min_key` -- MOVED -======================= +:mod:`min_key` -- Representation for the MongoDB internal MinKey type +===================================================================== +.. versionadded:: 1.7 -.. module:: pymongo.min_key - -This module has been deprecated in favor of -:mod:`bson.min_key`. Please use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.min_key + :synopsis: Representation for the MongoDB internal MinKey type + :members: diff -Nru pymongo-1.11/doc/api/pymongo/objectid.rst pymongo-1.7/doc/api/pymongo/objectid.rst --- pymongo-1.11/doc/api/pymongo/objectid.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/objectid.rst 2010-05-19 14:00:52.000000000 +0000 @@ -1,10 +1,27 @@ -:mod:`objectid` -- MOVED -======================== +:mod:`objectid` -- Tools for working with MongoDB ObjectIds +=========================================================== -.. module:: pymongo.objectid +.. automodule:: pymongo.objectid + :synopsis: Tools for working with MongoDB ObjectIds -This module has been deprecated in favor of -:mod:`bson.objectid`. Please use that module instead. + .. autoclass:: pymongo.objectid.ObjectId([oid=None]) + :members: -.. versionchanged:: 1.9 - Deprecated. + .. describe:: str(o) + + Get a hex encoded version of :class:`ObjectId` `o`. + + The following property always holds: + + .. testsetup:: + + from pymongo.objectid import ObjectId + + .. doctest:: + + >>> o = ObjectId() + >>> o == ObjectId(str(o)) + True + + This representation is useful for urls or other places where + ``o.binary`` is inappropriate. diff -Nru pymongo-1.11/doc/api/pymongo/son.rst pymongo-1.7/doc/api/pymongo/son.rst --- pymongo-1.11/doc/api/pymongo/son.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/son.rst 2010-01-08 19:31:24.000000000 +0000 @@ -1,10 +1,6 @@ -:mod:`son` -- MOVED -=================== +:mod:`son` -- Tools for working with SON, an ordered mapping +============================================================ -.. module:: pymongo.son - -This module has been deprecated in favor of :mod:`bson.son`. Please -use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.son + :synopsis: Tools for working with SON, an ordered mapping + :members: diff -Nru pymongo-1.11/doc/api/pymongo/timestamp.rst pymongo-1.7/doc/api/pymongo/timestamp.rst --- pymongo-1.11/doc/api/pymongo/timestamp.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/timestamp.rst 2010-06-16 20:38:52.000000000 +0000 @@ -1,10 +1,7 @@ -:mod:`timestamp` -- MOVED -========================= +:mod:`timestamp` -- Tools for representing MongoDB internal Timestamps +====================================================================== +.. versionadded:: 1.5 -.. module:: pymongo.timestamp - -This module has been deprecated in favor of -:mod:`bson.timestamp`. Please use that module instead. - -.. versionchanged:: 1.9 - Deprecated. +.. automodule:: pymongo.timestamp + :synopsis: Tools for representing MongoDB internal Timestamps + :members: diff -Nru pymongo-1.11/doc/api/pymongo/tz_util.rst pymongo-1.7/doc/api/pymongo/tz_util.rst --- pymongo-1.11/doc/api/pymongo/tz_util.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/api/pymongo/tz_util.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -:mod:`tz_util` -- MOVED -======================= - -.. module:: pymongo.tz_util - -This module has been deprecated in favor of -:mod:`bson.tz_util`. Please use that module instead. - -.. versionchanged:: 1.9 - Deprecated. diff -Nru pymongo-1.11/doc/changelog.rst pymongo-1.7/doc/changelog.rst --- pymongo-1.11/doc/changelog.rst 2011-05-05 21:57:25.000000000 +0000 +++ pymongo-1.7/doc/changelog.rst 2010-06-17 15:39:09.000000000 +0000 @@ -1,291 +1,15 @@ Changelog ========= -Changes in Version 1.11 ------------------------ - -Version 1.11 adds a few new features and fixes a few more bugs. - -New Features: - -- Basic IPv6 support: pymongo prefers IPv4 but will try IPv6. You can - also specify an IPv6 address literal in the `host` parameter or a - MongoDB URI provided it is enclosed in '[' and ']'. -- max_pool_size option: previously pymongo had a hard coded pool size - of 10 connections. With this change you can specify a different pool - size as a parameter to :class:`~pymongo.connection.Connection` - (max_pool_size=) or in the MongoDB URI (maxPoolSize=). -- Find by metadata in GridFS: You can know specify query fields as - keyword parameters for :meth:`~gridfs.GridFS.get_version` and - :meth:`~gridfs.GridFS.get_last_version`. -- Per-query slave_okay option: slave_okay=True is now a valid keyword - argument for :meth:`~pymongo.collection.Collection.find` and - :meth:`~pymongo.collection.Collection.find_one`. - -API changes: - -- :meth:`~pymongo.database.Database.validate_collection` now returns a - dict instead of a string. This change was required to deal with an - API change on the server. This method also now takes the optional - `scandata` and `full` parameters. See the documentation for more - details. - -.. warning:: The `pool_size`, `auto_start_request`, and `timeout` parameters - for :class:`~pymongo.connection.Connection` have been completely - removed in this release. They were deprecated in pymongo-1.4 and - have had no effect since then. Please make sure that your code - doesn't currently pass these parameters when creating a Connection - instance. - -Issues resolved -............... - -- `PYTHON-241 `_: - Support setting slaveok at the cursor level. -- `PYTHON-240 `_: - Queries can sometimes permanently fail after a replica set fail over. -- `PYTHON-238 `_: - error after few million requests -- `PYTHON-237 `_: - Basic IPv6 support. -- `PYTHON-236 `_: - Restore option to specify pool size in Connection. -- `PYTHON-212 `_: - pymongo does not recover after stale config -- `PYTHON-138 `_: - Find method for GridFS - -Changes in Version 1.10.1 -------------------------- - -Version 1.10.1 is primarily a bugfix release. It fixes a regression in -version 1.10 that broke pickling of ObjectIds. A number of other bugs -have been fixed as well. - -There are two behavior changes to be aware of: - -- If a read slave raises :class:`~pymongo.errors.AutoReconnect` - :class:`~pymongo.master_slave_connection.MasterSlaveConnection` will now - retry the query on each slave until it is successful or all slaves have - raised :class:`~pymongo.errors.AutoReconnect`. Any other exception will - immediately be raised. The order that the slaves are tried is random. - Previously the read would be sent to one randomly chosen slave and - :class:`~pymongo.errors.AutoReconnect` was immediately raised in case - of a connection failure. -- A Python `long` is now always BSON encoded as an int64. Previously the - encoding was based only on the value of the field and a `long` with a - value less than `2147483648` or greater than `-2147483649` would always - be BSON encoded as an int32. - -Issues resolved -............... - -- `PYTHON-234 `_: - Fix setup.py to raise exception if any when building extensions -- `PYTHON-233 `_: - Add information to build and test with extensions on windows -- `PYTHON-232 `_: - Traceback when hashing a DBRef instance -- `PYTHON-231 `_: - Traceback when pickling a DBRef instance -- `PYTHON-230 `_: - Pickled ObjectIds are not compatible between pymongo 1.9 and 1.10 -- `PYTHON-228 `_: - Cannot pickle bson.ObjectId -- `PYTHON-227 `_: - Traceback when calling find() on system.js -- `PYTHON-216 `_: - MasterSlaveConnection is missing disconnect() method -- `PYTHON-186 `_: - When storing integers, type is selected according to value instead of type -- `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone -- `PYTHON-113 `_: - Redunducy in MasterSlaveConnection - -Changes in Version 1.10 ------------------------ - -Version 1.10 includes changes to support new features in MongoDB 1.8.x. -Highlights include a modified map/reduce API including an inline map/reduce -helper method, a new find_and_modify helper, and the ability to query the -server for the maximum BSON document size it supports. - -- added :meth:`~pymongo.collection.Collection.find_and_modify`. -- added :meth:`~pymongo.collection.Collection.inline_map_reduce`. -- changed :meth:`~pymongo.collection.Collection.map_reduce`. - -.. warning:: MongoDB versions greater than 1.7.4 no longer generate temporary - collections for map/reduce results. An output collection name must be - provided and the output will replace any existing output collection with - the same name. :meth:`~pymongo.collection.Collection.map_reduce` now - requires the `out` parameter. - -Issues resolved -............... - -- PYTHON-225: :class:`~pymongo.objectid.ObjectId` class definition should use __slots__. -- PYTHON-223: Documentation fix. -- PYTHON-220: Documentation fix. -- PYTHON-219: KeyError in :meth:`~pymongo.collection.Collection.find_and_modify` -- PYTHON-213: Query server for maximum BSON document size. -- PYTHON-208: Fix :class:`~pymongo.connection.Connection` __repr__. -- PYTHON-207: Changes to Map/Reduce API. -- PYTHON-205: Accept slaveOk in the URI to match the URI docs. -- PYTHON-203: When slave_okay=True and we only specify one host don't autodetect other set members. -- PYTHON-194: Show size when whining about a document being too large. -- PYTHON-184: Raise :class:`~pymongo.errors.DuplicateKeyError` for duplicate keys in capped collections. -- PYTHON-178: Don't segfault when trying to encode a recursive data structure. -- PYTHON-177: Don't segfault when decoding dicts with broken iterators. -- PYTHON-172: Fix a typo. -- PYTHON-170: Add :meth:`~pymongo.collection.Collection.find_and_modify`. -- PYTHON-169: Support deepcopy of DBRef. -- PYTHON-167: Duplicate of PYTHON-166. -- PYTHON-166: Fixes a concurrency issue. -- PYTHON-158: Add code and err string to `db assertion` messages. - -Changes in Version 1.9 ----------------------- - -Version 1.9 adds a new package to the PyMongo distribution, -:mod:`bson`. :mod:`bson` contains all of the `BSON -`_ encoding and decoding logic, and the BSON -types that were formerly in the :mod:`pymongo` package. The following -modules have been renamed: - - - :mod:`pymongo.bson` -> :mod:`bson` - - :mod:`pymongo._cbson` -> :mod:`bson._cbson` and - :mod:`pymongo._cmessage` - - :mod:`pymongo.binary` -> :mod:`bson.binary` - - :mod:`pymongo.code` -> :mod:`bson.code` - - :mod:`pymongo.dbref` -> :mod:`bson.dbref` - - :mod:`pymongo.json_util` -> :mod:`bson.json_util` - - :mod:`pymongo.max_key` -> :mod:`bson.max_key` - - :mod:`pymongo.min_key` -> :mod:`bson.min_key` - - :mod:`pymongo.objectid` -> :mod:`bson.objectid` - - :mod:`pymongo.son` -> :mod:`bson.son` - - :mod:`pymongo.timestamp` -> :mod:`bson.timestamp` - - :mod:`pymongo.tz_util` -> :mod:`bson.tz_util` - -In addition, the following exception classes have been renamed: - - - :class:`pymongo.errors.InvalidBSON` -> - :class:`bson.errors.InvalidBSON` - - :class:`pymongo.errors.InvalidStringData` -> - :class:`bson.errors.InvalidStringData` - - :class:`pymongo.errors.InvalidDocument` -> - :class:`bson.errors.InvalidDocument` - - :class:`pymongo.errors.InvalidId` -> - :class:`bson.errors.InvalidId` - -The above exceptions now inherit from :class:`bson.errors.BSONError` -rather than :class:`pymongo.errors.PyMongoError`. - -.. note:: All of the renamed modules and exceptions above have aliases - created with the old names, so these changes should not break - existing code. The old names will eventually be deprecated and then - removed, so users should begin migrating towards the new names now. - -.. warning:: The change to the exception hierarchy mentioned above is - possibly breaking. If your code is catching - :class:`~pymongo.errors.PyMongoError`, then the exceptions raised - by :mod:`bson` will not be caught, even though they would have been - caught previously. Before upgrading, it is recommended that users - check for any cases like this. - -- the C extension now shares buffer.c/h with the Ruby driver -- :mod:`bson` no longer raises :class:`~pymongo.errors.InvalidName`, - all occurrences have been replaced with - :class:`~bson.errors.InvalidDocument`. -- renamed :meth:`bson._to_dicts` to :meth:`~bson.decode_all`. -- renamed :meth:`~bson.BSON.from_dict` to :meth:`~bson.BSON.encode` - and :meth:`~bson.BSON.to_dict` to :meth:`~bson.BSON.decode`. -- added :meth:`~pymongo.cursor.Cursor.batch_size`. -- allow updating (some) file metadata after a - :class:`~gridfs.grid_file.GridIn` instance has been closed. -- performance improvements for reading from GridFS. -- special cased slice with the same start and stop to return an empty - cursor. -- allow writing :class:`unicode` to GridFS if an :attr:`encoding` - attribute has been specified for the file. -- added :meth:`gridfs.GridFS.get_version`. -- scope variables for :class:`~bson.code.Code` can now be specified as - keyword arguments. -- added :meth:`~gridfs.grid_file.GridOut.readline` to - :class:`~gridfs.grid_file.GridOut`. -- make a best effort to transparently auto-reconnect if a - :class:`~pymongo.connection.Connection` has been idle for a while. -- added :meth:`~pymongo.database.SystemJS.list` to - :class:`~pymongo.database.SystemJS`. -- added `file_document` argument to :meth:`~gridfs.grid_file.GridOut` - to allow initializing from an existing file document. -- raise :class:`~pymongo.errors.TimeoutError` even if the - ``getLastError`` command was run manually and not through "safe" - mode. -- added :class:`uuid` support to :mod:`~bson.json_util`. - -Changes in Version 1.8.1 ------------------------- - -- fixed a typo in the C extension that could cause safe-mode - operations to report a failure (:class:`SystemError`) even when none - occurred. -- added a :meth:`__ne__` implementation to any class where we define - :meth:`__eq__`. - -Changes in Version 1.8 ----------------------- - -Version 1.8 adds support for connecting to replica sets, specifying -per-operation values for `w` and `wtimeout`, and decoding to -timezone-aware datetimes. - -- fixed a reference leak in the C extension when decoding a - :class:`~bson.dbref.DBRef`. -- added support for `w`, `wtimeout`, and `fsync` (and any other - options for `getLastError`) to "safe mode" operations. -- added :attr:`~pymongo.connection.Connection.nodes` property. -- added a maximum pool size of 10 sockets. -- added support for replica sets. -- DEPRECATED :meth:`~pymongo.connection.Connection.from_uri` and - :meth:`~pymongo.connection.Connection.paired`, both are supplanted - by extended functionality in :meth:`~pymongo.connection.Connection`. -- added tz aware support for datetimes in - :class:`~bson.objectid.ObjectId`, - :class:`~bson.timestamp.Timestamp` and :mod:`~bson.json_util` - methods. -- added :meth:`~pymongo.collection.Collection.drop` helper. -- reuse the socket used for finding the master when a - :class:`~pymongo.connection.Connection` is first created. -- added support for :class:`~bson.min_key.MinKey`, - :class:`~bson.max_key.MaxKey` and - :class:`~bson.timestamp.Timestamp` to :mod:`~bson.json_util`. -- added support for decoding datetimes as aware (UTC) - it is highly - recommended to enable this by setting the `tz_aware` parameter to - :meth:`~pymongo.connection.Connection` to ``True``. -- added `network_timeout` option for individual calls to - :meth:`~pymongo.collection.Collection.find` and - :meth:`~pymongo.collection.Collection.find_one`. -- added :meth:`~gridfs.GridFS.exists` to check if a file exists in - GridFS. -- added support for additional keys in :class:`~bson.dbref.DBRef` - instances. -- added :attr:`~pymongo.errors.OperationFailure.code` attribute to - :class:`~pymongo.errors.OperationFailure` exceptions. -- fixed serialization of int and float subclasses in the C extension. - Changes in Version 1.7 ---------------------- -Version 1.7 is a recommended upgrade for all PyMongo users. The full -release notes are below, and some more in depth discussion of the -highlights is `here -`_. +Version 1.7 is a recommended upgrade for all PyMongo users. The full release notes are below, and some more in depth discussion of the highlights is `here `_. - no longer attempt to build the C extension on big-endian systems. -- added :class:`~bson.min_key.MinKey` and - :class:`~bson.max_key.MaxKey`. -- use unsigned for :class:`~bson.timestamp.Timestamp` in BSON +- added :class:`~pymongo.min_key.MinKey` and + :class:`~pymongo.max_key.MaxKey`. +- use unsigned for :class:`~pymongo.timestamp.Timestamp` in BSON encoder/decoder. - support ``True`` as ``"ok"`` in command responses, in addition to ``1.0`` - necessary for server versions **>= 1.5.X** @@ -296,7 +20,7 @@ specify class for returned documents. - added `as_class` argument for :meth:`~pymongo.collection.Collection.find`, and in the BSON decoder. -- added support for creating :class:`~bson.timestamp.Timestamp` +- added support for creating :class:`~pymongo.timestamp.Timestamp` instances using a :class:`~datetime.datetime`. - allow `dropTarget` argument for :class:`~pymongo.collection.Collection.rename`. @@ -339,7 +63,7 @@ :class:`~pymongo.connection.Connection` instances. - more improvements to Python code caching in C extension - should improve behavior on mod_wsgi. -- added :meth:`~bson.objectid.ObjectId.from_datetime`. +- added :meth:`~pymongo.objectid.ObjectId.from_datetime`. - complete rewrite of :mod:`gridfs` support. - improvements to the :meth:`~pymongo.database.Database.command` API. - fixed :meth:`~pymongo.collection.Collection.drop_indexes` behavior @@ -367,7 +91,7 @@ Changes in Version 1.5 ---------------------- -- added subtype constants to :mod:`~bson.binary` module. +- added subtype constants to :mod:`~pymongo.binary` module. - DEPRECATED `options` argument to :meth:`~pymongo.collection.Collection` and :meth:`~pymongo.database.Database.create_collection` in favor of @@ -376,7 +100,7 @@ - added :meth:`~pymongo.connection.Connection.copy_database`. - added :data:`~pymongo.cursor.Cursor.alive` to tell when a cursor might have more data to return (useful for tailable cursors). -- added :class:`~bson.timestamp.Timestamp` to better support +- added :class:`~pymongo.timestamp.Timestamp` to better support dealing with internal MongoDB timestamps. - added `name` argument for :meth:`~pymongo.collection.Collection.create_index` and @@ -387,7 +111,7 @@ - :meth:`~pymongo.collection.Collection.insert` returns list for bulk inserts of size one. - fixed handling of :class:`datetime.datetime` instances in - :mod:`~bson.json_util`. + :mod:`~pymongo.json_util`. - added :meth:`~pymongo.connection.Connection.from_uri` to support MongoDB connection uri scheme. - fixed chunk number calculation when unaligned in :mod:`gridfs`. @@ -426,7 +150,7 @@ for all :mod:`~pymongo.errors`. this changes the exception hierarchy somewhat, and is a BREAKING change if you depend on :class:`~pymongo.errors.ConnectionFailure` being a :class:`IOError` - or :class:`~bson.errors.InvalidBSON` being a :class:`ValueError`, + or :class:`~pymongo.errors.InvalidBSON` being a :class:`ValueError`, for example. - added :class:`~pymongo.errors.DuplicateKeyError` for calls to :meth:`~pymongo.collection.Collection.insert` or @@ -447,7 +171,7 @@ parameters to :class:`~pymongo.connection.Connection`. DEPRECATED :meth:`~pymongo.connection.Connection.start_request`. - use :meth:`socket.sendall`. -- removed :meth:`~bson.son.SON.from_xml` as it was only being used +- removed :meth:`~pymongo.son.SON.from_xml` as it was only being used for some internal testing - also eliminates dependency on :mod:`elementtree`. - implementation of :meth:`~pymongo.message.update` in C. @@ -457,7 +181,7 @@ - support string as `key` argument to :meth:`~pymongo.collection.Collection.group` (keyf) and run all groups as commands. -- support for equality testing for :class:`~bson.code.Code` +- support for equality testing for :class:`~pymongo.code.Code` instances. - allow the NULL byte in strings and disallow it in key names or regex patterns @@ -494,11 +218,11 @@ - added ``setup.py doc --test`` to run doctests for tutorial, examples - moved most examples to Sphinx docs (and remove from *examples/* directory) -- raise :class:`~bson.errors.InvalidId` instead of +- raise :class:`~pymongo.errors.InvalidId` instead of :class:`TypeError` when passing a 24 character string to - :class:`~bson.objectid.ObjectId` that contains non-hexadecimal + :class:`~pymongo.objectid.ObjectId` that contains non-hexadecimal characters -- allow :class:`unicode` instances for :class:`~bson.objectid.ObjectId` init +- allow :class:`unicode` instances for :class:`~pymongo.objectid.ObjectId` init Changes in Version 1.2 ---------------------- @@ -515,17 +239,17 @@ queries - fix for :meth:`~pymongo.cursor.Cursor.__getitem__` after :meth:`~pymongo.cursor.Cursor.skip` -- allow any UTF-8 string in :class:`~bson.BSON` encoder, not +- allow any UTF-8 string in :class:`~pymongo.bson.BSON` encoder, not just ASCII subset -- added :attr:`~bson.objectid.ObjectId.generation_time` -- removed support for legacy :class:`~bson.objectid.ObjectId` +- added :attr:`~pymongo.objectid.ObjectId.generation_time` +- removed support for legacy :class:`~pymongo.objectid.ObjectId` format - pretty sure this was never used, and is just confusing -- DEPRECATED :meth:`~bson.objectid.ObjectId.url_encode` and - :meth:`~bson.objectid.ObjectId.url_decode` in favor of :meth:`str` - and :meth:`~bson.objectid.ObjectId`, respectively +- DEPRECATED :meth:`~pymongo.objectid.ObjectId.url_encode` and + :meth:`~pymongo.objectid.ObjectId.url_decode` in favor of :meth:`str` + and :meth:`~pymongo.objectid.ObjectId`, respectively - allow *oplog.$main* as a valid collection name - some minor fixes for installation process -- added support for datetime and regex in :mod:`~bson.json_util` +- added support for datetime and regex in :mod:`~pymongo.json_util` Changes in Version 1.1.2 ------------------------ @@ -540,8 +264,8 @@ :meth:`~pymongo.collection.Collection.update` - fix unicode regex patterns with C extension - added :meth:`~pymongo.collection.Collection.distinct` -- added `database` support for :class:`~bson.dbref.DBRef` -- added :mod:`~bson.json_util` with helpers for encoding / decoding +- added `database` support for :class:`~pymongo.dbref.DBRef` +- added :mod:`~pymongo.json_util` with helpers for encoding / decoding special types to JSON - DEPRECATED :meth:`pymongo.cursor.Cursor.__len__` in favor of :meth:`~pymongo.cursor.Cursor.count` with `with_limit_and_skip` set @@ -550,11 +274,11 @@ Changes in Version 1.1 ---------------------- -- added :meth:`__hash__` for :class:`~bson.dbref.DBRef` and - :class:`~bson.objectid.ObjectId` +- added :meth:`__hash__` for :class:`~pymongo.dbref.DBRef` and + :class:`~pymongo.objectid.ObjectId` - bulk :meth:`~pymongo.collection.Collection.insert` works with any iterable -- fix :class:`~bson.objectid.ObjectId` generation when using +- fix :class:`~pymongo.objectid.ObjectId` generation when using :mod:`multiprocessing` - added :attr:`~pymongo.cursor.Cursor.collection` - added `network_timeout` parameter for @@ -596,7 +320,7 @@ Changes in Version 0.15 ----------------------- -- fix string representation of :class:`~bson.objectid.ObjectId` +- fix string representation of :class:`~pymongo.objectid.ObjectId` instances - added `timeout` parameter for :meth:`~pymongo.collection.Collection.find` @@ -615,7 +339,7 @@ Changes in Version 0.14 ----------------------- -- support for long in :class:`~bson.BSON` +- support for long in :class:`~pymongo.bson.BSON` - added :meth:`~pymongo.collection.Collection.rename` - added `snapshot` parameter for :meth:`~pymongo.collection.Collection.find` @@ -633,11 +357,11 @@ Changes in Version 0.12 ----------------------- -- improved :class:`~bson.objectid.ObjectId` generation +- improved :class:`~pymongo.objectid.ObjectId` generation - added :class:`~pymongo.errors.AutoReconnect` exception for when reconnection is possible - make :mod:`gridfs` thread-safe -- fix for :mod:`gridfs` with non :class:`~bson.objectid.ObjectId` ``_id`` +- fix for :mod:`gridfs` with non :class:`~pymongo.objectid.ObjectId` ``_id`` Changes in Version 0.11.3 ------------------------- diff -Nru pymongo-1.11/doc/conf.py pymongo-1.7/doc/conf.py --- pymongo-1.11/doc/conf.py 2011-04-06 22:53:41.000000000 +0000 +++ pymongo-1.7/doc/conf.py 2010-05-19 14:01:01.000000000 +0000 @@ -5,7 +5,7 @@ # This file is execfile()d with the current directory set to its containing dir. import sys, os -sys.path[0:0] = [os.path.abspath('..')] +sys.path.append(os.path.abspath('..')) import pymongo @@ -27,7 +27,7 @@ # General information about the project. project = u'PyMongo' -copyright = u'2008 - 2011, 10gen, Inc.' +copyright = u'2009, Michael Dirolf' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -101,7 +101,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. #html_sidebars = {} diff -Nru pymongo-1.11/doc/contributors.rst pymongo-1.7/doc/contributors.rst --- pymongo-1.11/doc/contributors.rst 2011-04-28 19:05:59.000000000 +0000 +++ pymongo-1.7/doc/contributors.rst 2010-06-04 17:58:48.000000000 +0000 @@ -27,13 +27,3 @@ - Drew Perttula (drewp) - Carl Baatz (c-w-b) - Johan Bergstrom (jbergstroem) -- Jonas Haag (jonashaag) -- Kristina Chodorow (kchodorow) -- Andrew Sibley (sibsibsib) -- Flavio Percoco Premoli (FlaPer87) -- Ken Kurzweil (kurzweil) -- Christian Wyglendowski (dowski) -- Brendan W. McAdams (bwmcadams) -- Bernie Hackett (behackett) -- Aaron Westendorf (awestendorf) -- Dan Crosta (dcrosta) diff -Nru pymongo-1.11/doc/examples/custom_type.rst pymongo-1.7/doc/examples/custom_type.rst --- pymongo-1.11/doc/examples/custom_type.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/examples/custom_type.rst 2010-05-19 14:00:52.000000000 +0000 @@ -38,7 +38,7 @@ 10 When we try to save an instance of :class:`Custom` with PyMongo, we'll -get an :class:`~bson.errors.InvalidDocument` exception: +get an :class:`~pymongo.errors.InvalidDocument` exception: .. doctest:: @@ -126,9 +126,7 @@ >>> db.test.find_one()["custom"].x() 5 -If we get a new :class:`~pymongo.database.Database` instance we'll -clear out the :class:`~pymongo.son_manipulator.SONManipulator` -instance we added: +If we get a new :class:`~pymongo.database.Database` instance we'll clear out the :class:`~pymongo.son_manipulator.SONManipulator` instance we added: .. doctest:: @@ -153,14 +151,14 @@ We'll start by defining the methods :meth:`to_binary` and :meth:`from_binary`, which convert :class:`Custom` instances to and -from :class:`~bson.binary.Binary` instances: +from :class:`~pymongo.binary.Binary` instances: .. note:: You could just pickle the instance and save that. What we do here is a little more lightweight. .. doctest:: - >>> from bson.binary import Binary + >>> from pymongo.binary import Binary >>> def to_binary(custom): ... return Binary(str(custom.x()), 128) ... @@ -213,7 +211,7 @@ 5 We can see what's actually being saved to the database (and verify -that it is using a :class:`~bson.binary.Binary` instance) by +that it is using a :class:`~pymongo.binary.Binary` instance) by clearing out the manipulators and repeating our :meth:`~pymongo.collection.Collection.find_one`: diff -Nru pymongo-1.11/doc/examples/gridfs.rst pymongo-1.7/doc/examples/gridfs.rst --- pymongo-1.11/doc/examples/gridfs.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/examples/gridfs.rst 2010-05-19 14:01:01.000000000 +0000 @@ -1,6 +1,14 @@ GridFS Example ============== +.. warning:: + + This example is out of date, and documents the API for GridFS in + PyMongo versions < 1.6. If you are using a version of PyMongo that + is >= 1.6 please see `this blog post + `_ + for an overview of how the new API works. + .. testsetup:: from pymongo import Connection @@ -12,10 +20,6 @@ .. seealso:: The API docs for :mod:`gridfs`. -.. seealso:: `This blog post - `_ - for some motivation behind this API. - Setup ----- @@ -35,49 +39,33 @@ Saving and Retrieving Data -------------------------- -The simplest way to work with :mod:`gridfs` is to use its key/value -interface (the :meth:`~gridfs.GridFS.put` and -:meth:`~gridfs.GridFS.get` methods). To write data to GridFS, use -:meth:`~gridfs.GridFS.put`: +The :mod:`gridfs` module exposes a file-like interface that should be +familiar to most Python programmers. We can open a file for writing +and insert some data: .. doctest:: - >>> a = fs.put("hello world") + >>> f = fs.open("hello.txt", "w") + >>> f.write("hello ") + >>> f.write("world") + >>> f.close() -:meth:`~gridfs.GridFS.put` creates a new file in GridFS, and returns -the value of the file document's ``"_id"`` key. Given that ``"_id"`` -we can use :meth:`~gridfs.GridFS.get` to get back the contents of the -file: +We can then read back the data that was just inserted: .. doctest:: - >>> fs.get(a).read() + >>> g = fs.open("hello.txt") + >>> g.read() 'hello world' + >>> g.close() -:meth:`~gridfs.GridFS.get` returns a file-like object, so we get the -file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. - -In addition to putting a :class:`str` as a GridFS file, we can also -put any file-like object (an object with a :meth:`read` -method). GridFS will handle reading the file in chunk-sized segments -automatically. We can also add additional attributes to the file as -keyword arguments: +It's important that :meth:`~gridfs.grid_file.GridFile.close` gets +called for every file that gets opened. If you're using a Python +interpreter that supports the ``with`` statement doing so is easy: .. doctest:: - >>> b = fs.put(fs.get(a), filename="foo", bar="baz") - >>> out = fs.get(b) - >>> out.read() + >>> with fs.open("hello.txt") as g: + ... g.read() + ... 'hello world' - >>> out.filename - u'foo' - >>> out.bar - u'baz' - >>> out.upload_date - datetime.datetime(...) - -The attributes we set in :meth:`~gridfs.GridFS.put` are stored in the -file document, and retrievable after calling -:meth:`~gridfs.GridFS.get`. Some attributes (like ``"filename"``) are -special and are defined in the GridFS specification - see that -document for more details. diff -Nru pymongo-1.11/doc/examples/index.rst pymongo-1.7/doc/examples/index.rst --- pymongo-1.11/doc/examples/index.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/examples/index.rst 2010-05-19 14:01:01.000000000 +0000 @@ -19,5 +19,4 @@ gridfs map_reduce geo - replica_set custom_type diff -Nru pymongo-1.11/doc/examples/map_reduce.rst pymongo-1.7/doc/examples/map_reduce.rst --- pymongo-1.11/doc/examples/map_reduce.rst 2011-03-18 23:53:50.000000000 +0000 +++ pymongo-1.7/doc/examples/map_reduce.rst 2010-06-17 15:37:47.000000000 +0000 @@ -46,7 +46,7 @@ .. doctest:: - >>> from bson.code import Code + >>> from pymongo.code import Code >>> map = Code("function () {" ... " this.tags.forEach(function(z) {" ... " emit(z, 1);" @@ -73,7 +73,7 @@ .. doctest:: - >>> result = db.things.map_reduce(map, reduce, "myresults") + >>> result = db.things.map_reduce(map, reduce) >>> for doc in result.find(): ... print doc ... @@ -88,14 +88,14 @@ .. doctest:: - >>> db.things.map_reduce(map, reduce, "myresults", full_response=True) - {u'counts': {u'input': 4, u'emit': 6, u'output': 3}, u'timeMillis': ..., u'ok': ..., u'result': u'...'} + >>> db.things.map_reduce(map, reduce, full_response=True) + {u'counts': {u'input': 4, u'emit': 6, u'output': 3}, u'timeMillis': ..., u'ok': True, u'result': u'...'} All of the optional map/reduce parameters are also supported, simply pass them as keyword arguments. In this example we use the `query` parameter to limit the documents that will be mapped over: .. doctest:: - >>> result = db.things.map_reduce(map, reduce, "myresults", query={"x": {"$lt": 3}}) + >>> result = db.things.map_reduce(map, reduce, query={"x": {"$lt": 3}}) >>> for doc in result.find(): ... print doc ... diff -Nru pymongo-1.11/doc/examples/replica_set.rst pymongo-1.7/doc/examples/replica_set.rst --- pymongo-1.11/doc/examples/replica_set.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/examples/replica_set.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -Connecting to a Replica Set -=========================== - -PyMongo makes working with `replica sets -`_ easy. Here we'll launch a new -replica set and show how to handle both initialization and normal -connections with PyMongo. - -.. note:: Replica sets require server version **>= 1.6.0**. Support - for connecting to replica sets also requires PyMongo version **>= - 1.8.0**. - -.. mongodoc:: rs - -Starting a Replica Set ----------------------- - -The main `replica set documentation -`_ contains extensive information -about setting up a new replica set or migrating an existing MongoDB -setup, be sure to check that out. Here, we'll just do the bare minimum -to get a three node replica set setup locally. - -.. warning:: Replica sets should always use multiple nodes in - production - putting all set members on the same physical node is - only recommended for testing and development. - -We start three ``mongod`` processes, each on a different port and with -a different dbpath, but all using the same replica set name "foo". In -the example we use the hostname "morton.local", so replace that with -your hostname when running: - -.. code-block:: bash - - $ hostname - morton.local - $ mongod --replSet foo/morton.local:27018,morton.local:27019 --rest - -.. code-block:: bash - - $ mongod --port 27018 --dbpath /data/db1 --replSet foo/morton.local:27017 --rest - -.. code-block:: bash - - $ mongod --port 27019 --dbpath /data/db2 --replSet foo/morton.local:27017 --rest - -Initializing the Set --------------------- - -At this point all of our nodes are up and running, but the set has yet -to be initialized. Until the set is initialized no node will become -the primary, and things are essentially "offline". - -To initialize the set we need to connect to a single node and run the -initiate command. Since we don't have a primary yet, we'll need to -tell PyMongo that it's okay to connect to a slave/secondary:: - - >>> from pymongo import Connection - >>> c = Connection("morton.local:27017", slave_okay=True) - -.. note:: We could have connected to any of the other nodes instead, - but only the node we initiate from is allowed to contain any - initial data. - -After connecting, we run the initiate command to get things started -(here we just use an implicit configuration, for more advanced -configuration options see the replica set documentation):: - - >>> c.admin.command("replSetInitiate") - {u'info': u'Config now saved locally. Should come online in about a minute.', - u'info2': u'no configuration explicitly specified -- making one', u'ok': 1.0} - -The three ``mongod`` servers we started earlier will now coordinate -and come online as a replica set. - -Connecting to a Replica Set ---------------------------- - -The initial connection as made above is a special case for an -uninitialized replica set. Normally we'll want to connect -differently. A connection to a replica set can be made using the -normal :meth:`~pymongo.connection.Connection` constructor, specifying -one or more members of the set. For example, any of the following -will create a connection to the set we just created:: - - >>> Connection("morton.local") - Connection([u'morton.local:27019', 'morton.local:27017', u'morton.local:27018']) - >>> Connection("morton.local:27018") - Connection([u'morton.local:27019', u'morton.local:27017', 'morton.local:27018']) - >>> Connection("morton.local", 27019) - Connection(['morton.local:27019', u'morton.local:27017', u'morton.local:27018']) - >>> Connection(["morton.local:27018", "morton.local:27019"]) - Connection(['morton.local:27019', u'morton.local:27017', 'morton.local:27018']) - >>> Connection("mongodb://morton.local:27017,morton.local:27018,morton.local:27019") - Connection(['morton.local:27019', 'morton.local:27017', 'morton.local:27018']) - -The nodes passed to :meth:`~pymongo.connection.Connection` are called -the *seeds*. As long as at least one of the seeds is online, the -driver will be able to "discover" all of the nodes in the set and make -a connection to the current primary. - -Handling Failover ------------------ - -When a failover occurs, PyMongo will automatically attempt to find the -new primary node and perform subsequent operations on that node. This -can't happen completely transparently, however. Here we'll perform an -example failover to illustrate how everything behaves. First, we'll -connect to the replica set and perform a couple of basic operations:: - - >>> db = Connection("morton.local").test - >>> db.test.save({"x": 1}) - ObjectId('...') - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - -By checking the host and port, we can see that we're connected to -*morton.local:27017*, which is the current primary:: - - >>> db.connection.host - 'morton.local' - >>> db.connection.port - 27017 - -Now let's bring down that node and see what happens when we run our -query again:: - - >>> db.test.find_one() - Traceback (most recent call last): - pymongo.errors.AutoReconnect: ... - -We get an :class:`~pymongo.errors.AutoReconnect` exception. This means -that the driver was not able to connect to the old primary (which -makes sense, as we killed the server), but that it will attempt to -automatically reconnect on subsequent operations. When this exception -is raised our application code needs to decide whether to retry the -operation or to simply continue, accepting the fact that the operation -might have failed. - -On subsequent attempts to run the query we might continue to see this -exception. Eventually, however, the replica set will failover and -elect a new primary (this should take a couple of seconds in -general). At that point the driver will connect to the new primary and -the operation will succeed:: - - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - >>> db.connection.host - u'morton.local' - >>> db.connection.port - 27018 - - diff -Nru pymongo-1.11/doc/faq.rst pymongo-1.7/doc/faq.rst --- pymongo-1.11/doc/faq.rst 2011-03-19 00:39:47.000000000 +0000 +++ pymongo-1.7/doc/faq.rst 2010-06-17 15:37:47.000000000 +0000 @@ -111,14 +111,9 @@ Prior to PyMongo version 1.7, the correct way is to only save naive :class:`~datetime.datetime` instances, and to save all dates as UTC. In versions >= 1.7, the driver will automatically convert aware -datetimes to UTC before saving them. By default, datetimes retrieved -from the server (no matter what version of the driver you're using) -will be naive and represent UTC. In newer versions of the driver you -can set the :class:`~pymongo.connection.Connection` `tz_aware` -parameter to ``True``, which will cause all -:class:`~datetime.datetime` instances returned from that Connection to -be aware (UTC). This setting is recommended, as it can force -application code to handle timezones properly. +datetimes to UTC before saving them. All datetimes retrieved from the +server (no matter what version of the driver you're using) will be +naive and represent UTC. .. warning:: Be careful not to save naive :class:`~datetime.datetime` instances that are not UTC (i.e. the result of calling @@ -139,20 +134,14 @@ ------------------------------------------------------- `Django `_ is a popular Python web framework. Django includes an ORM, :mod:`django.db`. Currently, -there's no official MongoDB backend for Django. +MongoDB is not supported as a back-end for :mod:`django.db`. -`django-mongodb-engine `_ -is an unofficial, actively developed MongoDB backend that supports Django -aggregations, (atomic) updates, embedded objects, Map/Reduce and GridFS. -It allows you to use most of Django's built-in features, including the -ORM, admin, authentication, site and session frameworks and caching through -`django-mongodb-cache `_. - -However, it's easy to use MongoDB (and PyMongo) from Django -without using a Django backend. Certain features of Django that require +That being said, it's easy to use MongoDB (and PyMongo) from Django +without using such a project. Certain features of Django that require :mod:`django.db` (admin, authentication and sessions) will not work using just MongoDB, but most of what Django provides can still be -used. +used. This is similar to using Django on top of the `App Engine +datastore `_. We have written a demo `Django + MongoDB project `_. The README for that @@ -188,10 +177,10 @@ ---------------------------------------------------------------------------------------- The :mod:`json` module won't work out of the box with all documents from PyMongo as PyMongo supports some special types (like -:class:`~bson.objectid.ObjectId` and :class:`~bson.dbref.DBRef`) +:class:`~pymongo.objectid.ObjectId` and :class:`~pymongo.dbref.DBRef`) that are not supported in JSON. We've added some utilities for working with :mod:`json` and :mod:`simplejson` in the -:mod:`~bson.json_util` module. +:mod:`~pymongo.json_util` module. .. _year-2038-problem: @@ -201,7 +190,7 @@ :mod:`time_t` type. On most 32-bit operating systems :mod:`time_t` is a signed 4 byte integer which means it can't handle dates after 19 January 2038; this is known as the `year 2038 problem `_. Neither MongoDB nor -Python uses :mod:`time_t` to represent dates internally so do not suffer from this problem, but +Python uses :mod:`time_t` to represent dates internally so do not suffer from this problem, but Python's :mod:`datetime.datetime.fromtimestamp()` used by PyMongo's Python implementation of :mod:`bson` does, which means it is susceptible. Therefore, on 32-bit systems you may get an error retrieving dates after 2038 from MongoDB using PyMongo with the Python version of diff -Nru pymongo-1.11/doc/index.rst pymongo-1.7/doc/index.rst --- pymongo-1.11/doc/index.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/index.rst 2010-05-19 14:01:01.000000000 +0000 @@ -29,10 +29,6 @@ A listing of Python tools and libraries that have been written for MongoDB. -Getting Help ------------- -If you're having trouble or have questions about PyMongo, the best place to ask is the `MongoDB user group `_. Once you get an answer, it'd be great if you could work it back into this documentation and contribute! - Issues ------ All issues should be reported (and can be tracked / voted for / diff -Nru pymongo-1.11/doc/installation.rst pymongo-1.7/doc/installation.rst --- pymongo-1.11/doc/installation.rst 2011-05-02 20:49:17.000000000 +0000 +++ pymongo-1.7/doc/installation.rst 2010-01-08 19:31:24.000000000 +0000 @@ -3,28 +3,7 @@ .. highlight:: bash **PyMongo** is in the `Python Package Index -`_. - -Installing with pip -------------------- - -We recommend using `pip `_ -to install pymongo:: - - $ pip install pymongo - -To get a specific version of pymongo:: - - $ pip install pymongo==1.10.1 - -To upgrade using pip:: - - $ pip install --upgrade pymongo - -Installing with easy_install ----------------------------- - -If you must install pymongo using +`_. To install PyMongo using `setuptools `_ do:: $ easy_install pymongo @@ -33,41 +12,6 @@ $ easy_install -U pymongo -Mac OS Issues -------------- - -By default OSX uses `/usr/bin/easy_install` for third party package installs. -This script is hardcoded to use a version of setuptools that is older than -the version required by pymongo for python2.7 support. You can work around -it like this:: - - $ easy_install -U setuptools - $ python -m easy_install pymongo - -To upgrade do:: - - $ python -m easy_install -U pymongo - -**Xcode 4 Users**: The Python versions shipped with OSX 10.6.x are universal -binaries. They support i386, PPC, and (in the case of python2.6) x86_64. -Xcode 4 removed support for PPC. Because of this the distutils version -shipped with Apple's builds of Python will fail to build the C extensions -if you have Xcode 4 installed. This issue may also affect some builds of -Python downloaded from python.org. There is a workaround:: - - # For Apple-supplied Python2.6 (installed at /usr/bin/python2.6) - $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m easy_install pymongo - - # For 32-bit-only Python (/usr/bin/python2.5 and some builds - # from python.org) - $ env ARCHFLAGS='-arch i386' python -m easy_install pymongo - -See `http://bugs.python.org/issue11623 `_ -for a more detailed explanation. - -Install from source -------------------- - If you'd rather install directly from the source (i.e. to stay on the bleeding edge), check out the latest source from github and install the driver from the resulting tree:: @@ -76,30 +20,18 @@ $ cd pymongo/ $ python setup.py install -Dependencies for installing C Extensions on Unix ------------------------------------------------- - -10gen does not currently provide statically linked binary packages for -Unix. To build the optional C extensions you must have the GNU C compiler -(gcc) installed. Depending on your flavor of Unix (or Linux distribution) -you may also need a python-dev package that provides the necessary header -files for your version of Python. The package name may vary from distro -to distro. - -Pre-built eggs are available for OSX Snow Leopard on PyPI. - .. _install-no-c: -Installing Without C Extensions -------------------------------- -By default, the driver attempts to build and install optional C -extensions (used for increasing performance) when it is installed. If -any extension fails to build the driver will be installed anyway but a +Installing Without the C Extension +---------------------------------- +By default, the driver attempts to build and install an optional C +extension (used for increasing performance) when it is installed. If +the extension fails to build the driver will be installed anyway but a warning will be printed. -In :ref:`certain cases `, you might wish to -install the driver without the C extensions, even if the extensions -build properly. This can be done using a command line option to -*setup.py*:: +In :ref:`certain cases `, you +might wish to install the driver without the C extension, even if the +extension builds properly. This can be done using a command line +option to *setup.py*:: $ python setup.py --no_ext install diff -Nru pymongo-1.11/doc/mongo_extensions.py pymongo-1.7/doc/mongo_extensions.py --- pymongo-1.11/doc/mongo_extensions.py 2011-04-06 20:51:09.000000000 +0000 +++ pymongo-1.7/doc/mongo_extensions.py 2010-05-19 14:01:01.000000000 +0000 @@ -20,11 +20,9 @@ from sphinx.util.compat import (Directive, make_admonition) - class mongodoc(nodes.Admonition, nodes.Element): pass - class mongoref(nodes.reference): pass @@ -62,10 +60,10 @@ env = self.state.document.settings.env return make_admonition(mongodoc, self.name, - ['See general MongoDB documentation'], + [_('See general MongoDB documentation')], self.options, self.content, self.lineno, - self.content_offset, self.block_text, - self.state, self.state_machine) + self.content_offset, self.block_text, self.state, + self.state_machine) def process_mongodoc_nodes(app, doctree, fromdocname): @@ -85,7 +83,7 @@ link = mongoref("", "") link["refuri"] = "http://dochub.mongodb.org/core/%s" % tag link["name"] = anchor - link.append(nodes.emphasis(tag, tag)) + link.append(nodes.emphasis(_(tag), _(tag))) new_para = nodes.paragraph() new_para += link node.replace(para, new_para) diff -Nru pymongo-1.11/doc/tools.rst pymongo-1.7/doc/tools.rst --- pymongo-1.11/doc/tools.rst 2011-04-06 22:49:58.000000000 +0000 +++ pymongo-1.7/doc/tools.rst 2010-05-19 14:01:01.000000000 +0000 @@ -26,7 +26,7 @@ increased your understanding of how MongoDB actually works. MongoKit - The `MongoKit `_ framework + The `MongoKit `_ framework is an ORM-like layer on top of PyMongo. There is also a MongoKit `google group `_. @@ -39,12 +39,6 @@ `_ for more details. -MongoAlchemy - `MongoAlchemy `_ is another ORM-like layer on top of - PyMongo. Its API is inspired by `SQLAlchemy `_. The - code is available `on github `_; - for more information, see `the tutorial `_. - MongoEngine `MongoEngine `_ is another ORM-like layer on top of PyMongo. It allows you to define schemas for @@ -53,26 +47,27 @@ `_; for more information, see the `tutorial `_. -django-mongodb-engine - `Django MongoDB Engine - `_ is a MongoDB - database backend for Django. - -Minimongo - `minimongo `_ is a lightweight, - pythonic interface to MongoDB. It retains pymongo's query and update API, - and provides a number of additional features, including a simple - document-oriented interface, connection pooling, index management, and - collection & database naming helpers. The `source is on github - `_. +Django + The `Django `_ fork adds nosql + support to Django and implements a mongodb backend that handles + mongodb databases. There are 2 repos being used for this fork, the + one in the first link and this `Django + `_. Both repositories are + synchronized and have the same code. + +pymongo-bongo + `pymongo-bongo `_ is a + project to add some syntactic sugar on top of PyMongo. It is open + source and the code is available on `github + `_. Smaller or less well-maintained projects (in order from most to least recently committed to at the time of the last update to this list): - - `pymongo-bongo `_ (11/2009) - - `mongodb-object `_ (10/2009) - - `MongoMagic `_ (7/2009) - - `django-mongodb `_ (7/2009) + - `mongodb-object `_ + - `mongo-mapper `_ + - `MongoMagic `_ + - `django-mongodb `_ Framework Tools --------------- @@ -86,9 +81,9 @@ mongodb_beaker `mongodb_beaker `_ is a project to enable using MongoDB as a backend for `beaker's - `_ caching / session system. - `The source is on github - `_. + `_ caching / session system. The + `source is on bitbucket + `_. MongoLog `MongoLog `_ diff -Nru pymongo-1.11/doc/tutorial.rst pymongo-1.7/doc/tutorial.rst --- pymongo-1.11/doc/tutorial.rst 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/doc/tutorial.rst 2010-06-17 15:37:47.000000000 +0000 @@ -13,8 +13,8 @@ Prerequisites ------------- Before we start, make sure that you have the **PyMongo** distribution -:doc:`installed `. In the Python shell, the following -should run without raising an exception: +:doc:`installed `. In the Python shell, the following should run without +raising an exception: .. doctest:: @@ -204,7 +204,7 @@ There are a couple of interesting things to note about this example: - The call to :meth:`~pymongo.collection.Collection.insert` now - returns two :class:`~bson.objectid.ObjectId` instances, one for + returns two :class:`~pymongo.objectid.ObjectId` instances, one for each inserted document. - ``new_posts[1]`` has a different "shape" than the other posts - there is no ``"tags"`` field and we've added a new field, diff -Nru pymongo-1.11/ez_setup.py pymongo-1.7/ez_setup.py --- pymongo-1.11/ez_setup.py 2011-01-21 03:03:14.000000000 +0000 +++ pymongo-1.7/ez_setup.py 2009-10-12 13:37:58.000000000 +0000 @@ -14,7 +14,7 @@ This file can also be run as a script to install or upgrade setuptools. """ import sys -DEFAULT_VERSION = "0.6c11" +DEFAULT_VERSION = "0.6c9" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { @@ -28,14 +28,6 @@ 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', - 'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090', - 'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4', - 'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7', - 'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5', - 'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de', - 'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b', - 'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2', - 'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', @@ -112,11 +104,11 @@ "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) + else: + del pkg_resources, sys.modules['pkg_resources'] # reload ok + return do_download() except pkg_resources.DistributionNotFound: - pass - - del pkg_resources, sys.modules['pkg_resources'] # reload ok - return do_download() + return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, diff -Nru pymongo-1.11/gridfs/errors.py pymongo-1.7/gridfs/errors.py --- pymongo-1.11/gridfs/errors.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/gridfs/errors.py 2010-06-17 15:37:47.000000000 +0000 @@ -35,14 +35,12 @@ .. versionadded:: 1.6 """ - class FileExists(GridFSError): """Raised when trying to create a file that already exists. .. versionadded:: 1.7 """ - class UnsupportedAPI(GridFSError): """Raised when trying to use the old GridFS API. diff -Nru pymongo-1.11/gridfs/grid_file.py pymongo-1.7/gridfs/grid_file.py --- pymongo-1.11/gridfs/grid_file.py 2011-04-26 21:31:14.000000000 +0000 +++ pymongo-1.7/gridfs/grid_file.py 2010-06-17 15:33:47.000000000 +0000 @@ -22,22 +22,21 @@ except ImportError: from StringIO import StringIO -from bson.binary import Binary -from bson.objectid import ObjectId from gridfs.errors import (CorruptGridFile, FileExists, NoFile, UnsupportedAPI) from pymongo import ASCENDING +from pymongo.binary import Binary from pymongo.collection import Collection from pymongo.errors import DuplicateKeyError +from pymongo.objectid import ObjectId try: _SEEK_SET = os.SEEK_SET _SEEK_CUR = os.SEEK_CUR _SEEK_END = os.SEEK_END -# before 2.5 -except AttributeError: +except AttributeError: # before 2.5 _SEEK_SET = 0 _SEEK_CUR = 1 _SEEK_END = 2 @@ -56,16 +55,18 @@ raise AttributeError("can only get %r on a closed file" % field_name) return self._file.get(field_name, None) - def setter(self, value): if self._closed: - self._coll.files.update({"_id": self._file["_id"]}, - {"$set": {field_name: value}}, safe=True) + raise AttributeError("cannot set %r on a closed file" % + field_name) self._file[field_name] = value if read_only: docstring = docstring + "\n\nThis attribute is read-only." - elif closed_only: + elif not closed_only: + docstring = "%s\n\n%s" % (docstring, "This attribute can only be " + "set before :meth:`close` has been called.") + else: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") @@ -95,7 +96,7 @@ arguments include: - ``"_id"``: unique ID for this file (default: - :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must + :class:`~pymongo.objectid.ObjectId`) - this ``"_id"`` must not have already been used for another file - ``"filename"``: human name for the file @@ -106,17 +107,12 @@ - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 256 kb) - - ``"encoding"``: encoding used for this file - any - :class:`unicode` that is written to the file will be - converted to a :class:`str` with this encoding - :Parameters: - `root_collection`: root collection to write to - `**kwargs` (optional): file level options (see above) """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") # Handle alternative naming if "content_type" in kwargs: @@ -128,8 +124,7 @@ kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) - root_collection.chunks.ensure_index([("files_id", ASCENDING), - ("n", ASCENDING)], + root_collection.chunks.ensure_index([("files_id", ASCENDING), ("n", ASCENDING)], unique=True) object.__setattr__(self, "_coll", root_collection) object.__setattr__(self, "_chunks", root_collection.chunks) @@ -166,10 +161,9 @@ raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name, value): - object.__setattr__(self, name, value) if self._closed: - self._coll.files.update({"_id": self._file["_id"]}, - {"$set": {name: value}}, safe=True) + raise AttributeError("cannot set %r on a closed file" % name) + object.__setattr__(self, name, value) def __flush_data(self, data): """Flush `data` to a chunk. @@ -210,6 +204,7 @@ except DuplicateKeyError: raise FileExists("file with _id %r already exists" % self._id) + def close(self): """Flush the file and close it. @@ -220,36 +215,28 @@ self.__flush() self._closed = True + # TODO should support writing unicode to a file. this means that files will + # need to have an encoding attribute. def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object - (implementing :meth:`read`). If the file has an - :attr:`encoding` attribute, `data` can also be a - :class:`unicode` instance, which will be encoded as - :attr:`encoding` before being written. + (implementing :meth:`read`). Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of - :class:`str`, a file-like object, or an instance of - :class:`unicode` (only allowed if the file has an - :attr:`encoding` attribute). + :class:`str` or a file-like object. :Parameters: - `data`: string of bytes or file-like object to be written to the file - - .. versionadded:: 1.9 - The ability to write :class:`unicode`, if the file has an - :attr:`encoding` attribute. """ if self._closed: raise ValueError("cannot write to a closed file") - # file-like - try: + try: # file-like if self._buffer.tell() > 0: space = self.chunk_size - self._buffer.tell() self._buffer.write(data.read(space)) @@ -259,18 +246,10 @@ self.__flush_data(to_write) to_write = data.read(self.chunk_size) self._buffer.write(to_write) - # string - except AttributeError: - if not isinstance(data, basestring): + except AttributeError: # string + if not isinstance(data, str): raise TypeError("can only write strings or file-like objects") - if isinstance(data, unicode): - try: - data = data.encode(self.encoding) - except AttributeError: - raise TypeError("must specify an encoding for file in " - "order to write unicode") - while data: space = self.chunk_size - self._buffer.tell() @@ -301,46 +280,35 @@ Close the file and allow exceptions to propogate. """ self.close() - - # propogate exceptions - return False + return False # propogate exceptions class GridOut(object): """Class to read data out of GridFS. """ - def __init__(self, root_collection, file_id=None, file_document=None): + def __init__(self, root_collection, file_id): """Read a file from GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. - Either `file_id` or `file_document` must be specified, - `file_document` will be given priority if present. Raises - :class:`TypeError` if `root_collection` is not an instance of + Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. :Parameters: - `root_collection`: root collection to read from - `file_id`: value of ``"_id"`` for the file to read - - `file_document`: file document from `root_collection.files` - - .. versionadded:: 1.9 - The `file_document` parameter. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") self.__chunks = root_collection.chunks - - files = root_collection.files - self._file = file_document or files.find_one({"_id": file_id}) + self._file = root_collection.files.find_one({"_id": file_id}) if not self._file: raise NoFile("no file in gridfs collection %r with _id %r" % - (files, file_id)) + (root_collection, file_id)) self.__buffer = "" self.__position = 0 @@ -366,7 +334,7 @@ def __getattr__(self, name): if name in self._file: return self._file[name] - raise AttributeError("GridOut object has no attribute '%s'" % name) + raise AttributeError("GridIn object has no attribute '%s'" % name) def read(self, size=-1): """Read at most `size` bytes from the file (less if there @@ -385,47 +353,27 @@ if size < 0 or size > remainder: size = remainder - received = len(self.__buffer) - chunk_number = (received + self.__position) / self.chunk_size - chunks = [] + data = self.__buffer + chunk_number = (len(data) + self.__position) / self.chunk_size - while received < size: + while len(data) < size: chunk = self.__chunks.find_one({"files_id": self._id, "n": chunk_number}) if not chunk: raise CorruptGridFile("no chunk #%d" % chunk_number) - if received: - chunk_data = chunk["data"] + if not data: + data += chunk["data"][self.__position % self.chunk_size:] else: - chunk_data = chunk["data"][self.__position % self.chunk_size:] + data += chunk["data"] - received += len(chunk_data) - chunks.append(chunk_data) chunk_number += 1 - data = "".join([self.__buffer] + chunks) self.__position += size to_return = data[:size] self.__buffer = data[size:] return to_return - def readline(self, size=-1): - """Read one line or up to `size` bytes from the file. - - :Parameters: - - `size` (optional): the maximum number of bytes to read - - .. versionadded:: 1.9 - """ - bytes = "" - while len(bytes) != size: - byte = self.read(1) - bytes += byte - if byte == "" or byte == "\n": - break - return bytes - def tell(self): """Return the current position of this file. """ diff -Nru pymongo-1.11/gridfs/__init__.py pymongo-1.7/gridfs/__init__.py --- pymongo-1.11/gridfs/__init__.py 2011-04-28 19:03:54.000000000 +0000 +++ pymongo-1.7/gridfs/__init__.py 2010-06-01 19:53:40.000000000 +0000 @@ -28,7 +28,6 @@ DESCENDING) from pymongo.database import Database - class GridFS(object): """An instance of GridFS on top of a single Database. """ @@ -87,11 +86,8 @@ >>> f.close() `data` can be either an instance of :class:`str` or a - file-like object providing a :meth:`read` method. If an - `encoding` keyword argument is passed, `data` can also be a - :class:`unicode` instance, which will be encoded as `encoding` - before being written. Any keyword arguments will be passed - through to the created file - see + file-like object providing a :meth:`read` method. Any keyword + arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. @@ -103,10 +99,6 @@ - `data`: data to be written as a file. - `**kwargs` (optional): keyword arguments for file creation - .. versionadded:: 1.9 - The ability to write :class:`unicode`, if an `encoding` has - been specified as a keyword argument. - .. versionadded:: 1.6 """ grid_file = GridIn(self.__collection, **kwargs) @@ -129,81 +121,33 @@ """ return GridOut(self.__collection, file_id) - def get_version(self, filename=None, version=-1, **kwargs): - """Get a file from GridFS by ``"filename"`` or metadata fields. + def get_last_version(self, filename): + """Get a file from GridFS by ``"filename"``. - Returns a version of the file in GridFS whose filename matches - `filename` and whose metadata fields match the supplied keyword - arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. - - Version numbering is a convenience atop the GridFS API provided - by MongoDB. If more than one file matches the query (either by - `filename` alone, by metadata fields, or by a combination of - both), then version ``-1`` will be the most recently uploaded - matching file, ``-2`` the second most recently - uploaded, etc. Version ``0`` will be the first version - uploaded, ``1`` the second version, etc. So if three versions - have been uploaded, then version ``0`` is the same as version - ``-3``, version ``1`` is the same as version ``-2``, and - version ``2`` is the same as version ``-1``. - - Raises :class:`~gridfs.errors.NoFile` if no such version of - that file exists. + Returns the most recently uploaded file in GridFS with the + name `filename` as an instance of + :class:`~gridfs.grid_file.GridOut`. Raises + :class:`~gridfs.errors.NoFile` if no such file exists. An index on ``{filename: 1, uploadDate: -1}`` will automatically be created when this method is called the first time. :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `version` (optional): version of the file to get (defualts - to -1, the most recent version uploaded) - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 1.10.1+ - `filename` defaults to None; - .. versionadded:: 1.10.1+ - accept keyword arguments to find files by custom metadata. - .. versionadded:: 1.9 + - `filename`: ``"filename"`` of the file to get + + .. versionadded:: 1.6 """ self.__files.ensure_index([("filename", ASCENDING), ("uploadDate", DESCENDING)]) - query = kwargs - if filename is not None: - query["filename"] = filename - - cursor = self.__files.find(query, ["_id"]) - if version < 0: - skip = abs(version) - 1 - cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) - else: - cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + cursor = self.__files.find({"filename": filename}) + cursor.limit(-1).sort("uploadDate", DESCENDING) try: grid_file = cursor.next() return GridOut(self.__collection, grid_file["_id"]) except StopIteration: - raise NoFile("no version %d for filename %r" % (version, filename)) - - def get_last_version(self, filename=None, **kwargs): - """Get the most recent version of a file in GridFS by ``"filename"`` - or metadata fields. - - Equivalent to calling :meth:`get_version` with the default - `version` (``-1``). - - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 1.10.1+ - `filename` defaults to None; - .. versionadded:: 1.10.1+ - accept keyword arguments to find files by custom metadata. See - :meth:`get_version`. - .. versionadded:: 1.6 - """ - return self.get_version(filename=filename, **kwargs) + raise NoFile("no file in gridfs with filename %r" % filename) # TODO add optional safe mode for chunk removal? def delete(self, file_id): @@ -217,9 +161,6 @@ file. Care should be taken to avoid concurrent reads to a file while it is being deleted. - .. note:: Deletes of non-existent files are considered successful - since the end result is the same: no file with that _id remains. - :Parameters: - `file_id`: ``"_id"`` of the file to delete @@ -237,45 +178,6 @@ """ return self.__files.distinct("filename") - def exists(self, document_or_id=None, **kwargs): - """Check if a file exists in this instance of :class:`GridFS`. - - The file to check for can be specified by the value of it's - ``_id`` key, or by passing in a query document. A query - document can be passed in as dictionary, or by using keyword - arguments. Thus, the following three calls are equivalent: - - >>> fs.exists(file_id) - >>> fs.exists({"_id": file_id}) - >>> fs.exists(_id=file_id) - - As are the following two calls: - - >>> fs.exists({"filename": "mike.txt"}) - >>> fs.exists(filename="mike.txt") - - And the following two: - - >>> fs.exists({"foo": {"$gt": 12}}) - >>> fs.exists(foo={"$gt": 12}) - - Returns ``True`` if a matching file exists, ``False`` - otherwise. Calls to :meth:`exists` will not automatically - create appropriate indexes; application developers should be - sure to create indexes if needed and as appropriate. - - :Parameters: - - `document_or_id` (optional): query document, or _id of the - document to check for - - `**kwargs` (optional): keyword arguments are used as a - query document, if they're present. - - .. versionadded:: 1.8 - """ - if kwargs: - return self.__files.find_one(kwargs, ["_id"]) is not None - return self.__files.find_one(document_or_id, ["_id"]) is not None - def open(self, *args, **kwargs): """No longer supported. diff -Nru pymongo-1.11/MANIFEST.in pymongo-1.7/MANIFEST.in --- pymongo-1.11/MANIFEST.in 2011-03-21 21:41:46.000000000 +0000 +++ pymongo-1.7/MANIFEST.in 2010-05-19 14:01:01.000000000 +0000 @@ -6,4 +6,4 @@ recursive-include tools *.py include tools/README.rst recursive-include test *.py -recursive-include bson *.h +recursive-include pymongo *.h diff -Nru pymongo-1.11/PKG-INFO pymongo-1.7/PKG-INFO --- pymongo-1.11/PKG-INFO 2011-05-05 22:31:25.000000000 +0000 +++ pymongo-1.7/PKG-INFO 2010-06-17 16:50:49.000000000 +0000 @@ -1,39 +1,26 @@ Metadata-Version: 1.0 Name: pymongo -Version: 1.11 +Version: 1.7 Summary: Python driver for MongoDB Home-page: http://github.com/mongodb/mongo-python-driver -Author: Bernie Hackett -Author-email: bernie@10gen.com +Author: Mike Dirolf +Author-email: mongodb-user@googlegroups.com License: Apache License, Version 2.0 Description: ======= PyMongo ======= :Info: See `the mongo site `_ for more information. See `github `_ for the latest source. - :Author: Mike Dirolf - :Maintainer: Bernie Hackett + :Author: Mike Dirolf About ===== The PyMongo distribution contains tools for interacting with MongoDB - database from Python. The ``bson`` package is an implementation of - the `BSON format `_ for Python. The ``pymongo`` - package is a native Python driver for MongoDB. The ``gridfs`` package - is a `gridfs + database from Python. The ``pymongo`` package is a native Python + driver for MongoDB. The ``gridfs`` package is a `gridfs `_ implementation on top of ``pymongo``. - Issues / Questions / Feedback - ============================= - - Any issues with, questions about, or feedback for PyMongo should be - sent to the mongodb-user list on Google Groups. For confirmed issues - or feature requests, open a case on `jira - `_. Please do not e-mail any of the PyMongo - developers directly with issues or questions - you're more likely to - get an answer on the list. - Installation ============ @@ -109,7 +96,7 @@ .. _sphinx: http://sphinx.pocoo.org/ -Keywords: mongo,mongodb,pymongo,gridfs,bson +Keywords: mongo,mongodb,pymongo,gridfs Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers diff -Nru pymongo-1.11/pymongo/binary.py pymongo-1.7/pymongo/binary.py --- pymongo-1.11/pymongo/binary.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/binary.py 2010-06-17 15:37:47.000000000 +0000 @@ -12,4 +12,96 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.binary import * +"""Tools for representing binary data to be stored in MongoDB. +""" + +BINARY_SUBTYPE = 0 +"""BSON binary subtype for binary data. + +This is becomming the default subtype and should be the most commonly used. + +.. versionadded:: 1.5 +""" + +FUNCTION_SUBTYPE = 1 +"""BSON binary subtype for functions. + +.. versionadded:: 1.5 +""" + +OLD_BINARY_SUBTYPE = 2 +"""Old BSON binary subtype for binary data. + +This is still the default subtype, but that is changing to :data:`BINARY_SUBTYPE`. + +.. versionadded:: 1.7 +""" + +UUID_SUBTYPE = 3 +"""BSON binary subtype for a UUID. + +:class:`uuid.UUID` instances will automatically be encoded +by :mod:`~pymongo.bson` using this subtype. + +.. versionadded:: 1.5 +""" + +MD5_SUBTYPE = 5 +"""BSON binary subtype for an MD5 hash. + +.. versionadded:: 1.5 +""" + +USER_DEFINED_SUBTYPE = 128 +"""BSON binary subtype for any user defined structure. + +.. versionadded:: 1.5 +""" + + +class Binary(str): + """Representation of binary data to be stored in or retrieved from MongoDB. + + This is necessary because we want to store Python strings as the + BSON string type. We need to wrap binary data so we can tell the + difference between what should be considered binary data and what + should be considered a string when we encode to BSON. + + Raises TypeError if `data` is not an instance of str or `subtype` + is not an instance of int. Raises ValueError if `subtype` is not + in [0, 256). + + :Parameters: + - `data`: the binary data to represent + - `subtype` (optional): the `binary subtype + `_ + to use + """ + + def __new__(cls, data, subtype=OLD_BINARY_SUBTYPE): + if not isinstance(data, str): + raise TypeError("data must be an instance of str") + if not isinstance(subtype, int): + raise TypeError("subtype must be an instance of int") + if subtype >= 256 or subtype < 0: + raise ValueError("subtype must be contained in [0, 256)") + self = str.__new__(cls, data) + self.__subtype = subtype + return self + + @property + def subtype(self): + """Subtype of this binary data. + """ + return self.__subtype + + def __eq__(self, other): + if isinstance(other, Binary): + return (self.__subtype, str(self)) == (other.subtype, str(other)) + # We don't return NotImplemented here because if we did then + # Binary("foo") == "foo" would return True, since Binary is a + # subclass of str... + return False + + def __repr__(self): + return "Binary(%s, %s)" % (str.__repr__(self), self.__subtype) diff -Nru pymongo-1.11/pymongo/bson.py pymongo-1.7/pymongo/bson.py --- pymongo-1.11/pymongo/bson.py 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/bson.py 2010-06-17 15:37:47.000000000 +0000 @@ -0,0 +1,454 @@ +# Copyright 2009-2010 10gen, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for dealing with Mongo's BSON data representation. + +Generally not needed to be used by application developers.""" + +import struct +import re +import datetime +import calendar + +from pymongo.binary import Binary +from pymongo.code import Code +from pymongo.dbref import DBRef +from pymongo.errors import (InvalidBSON, + InvalidDocument, + InvalidName, + InvalidStringData) +from pymongo.max_key import MaxKey +from pymongo.min_key import MinKey +from pymongo.objectid import ObjectId +from pymongo.son import SON +from timestamp import Timestamp + + +try: + import _cbson + _use_c = True +except ImportError: + _use_c = False + +try: + import uuid + _use_uuid = True +except ImportError: + _use_uuid = False + + +# This sort of sucks, but seems to be as good as it gets... +RE_TYPE = type(re.compile("")) + + +def _get_int(data, as_class=None, unsigned=False): + format = unsigned and "I" or "i" + try: + value = struct.unpack("<%s" % format, data[:4])[0] + except struct.error: + raise InvalidBSON() + + return (value, data[4:]) + + +def _get_c_string(data, length=None): + if length is None: + try: + length = data.index("\x00") + except ValueError: + raise InvalidBSON() + + return (unicode(data[:length], "utf-8"), data[length + 1:]) + + +def _make_c_string(string, check_null=False): + if check_null and "\x00" in string: + raise InvalidDocument("BSON keys / regex patterns must not " + "contain a NULL character") + if isinstance(string, unicode): + return string.encode("utf-8") + "\x00" + else: + try: + string.decode("utf-8") + return string + "\x00" + except: + raise InvalidStringData("strings in documents must be valid " + "UTF-8: %r" % string) + + +def _get_number(data, as_class): + return (struct.unpack(" 2 ** 64 / 2 - 1 or value < -2 ** 64 / 2: + raise OverflowError("MongoDB can only handle up to 8-byte ints") + if value > 2 ** 32 / 2 - 1 or value < -2 ** 32 / 2: + return "\x12" + name + struct.pack(" 4 * 1024 * 1024: + raise InvalidDocument("document too large - BSON documents are" + "limited to 4 MB") + return struct.pack(" 4 * 1024 * 1024: + raise InvalidBSON("BSON documents are limited to 4MB") + + try: + (_, remainder) = _bson_to_dict(bson, dict) + return remainder == "" + except: + return False + + +class BSON(str): + """BSON data. + + Represents binary data storable in and retrievable from Mongo. + """ + + @classmethod + def from_dict(cls, dct, check_keys=False): + """Create a new :class:`BSON` instance from a mapping type + (like :class:`dict`). + + Raises :class:`TypeError` if `dct` is not a mapping type, or + contains keys that are not instances of :class:`basestring`. + Raises :class:`~pymongo.errors.InvalidDocument` if `dct` + cannot be converted to :class:`BSON`. + + :Parameters: + - `dct`: mapping type representing a document + - `check_keys` (optional): check if keys start with '$' or + contain '.', raising :class:`~pymongo.errors.InvalidName` + in either case + """ + return cls(_dict_to_bson(dct, check_keys)) + + def to_dict(self, as_class=dict): + """Convert this BSON data to a mapping type. + + The default type to use is :class:`dict`. This can be replaced + using the `as_class` parameter. + + :Parameters: + - `as_class` (optional): the class to use for the resulting + document + + .. versionadded:: 1.7 + the `as_class` parameter + """ + (document, _) = _bson_to_dict(self, as_class) + return document diff -Nru pymongo-1.11/pymongo/_cbsonmodule.c pymongo-1.7/pymongo/_cbsonmodule.c --- pymongo-1.11/pymongo/_cbsonmodule.c 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/_cbsonmodule.c 2010-06-04 18:01:26.000000000 +0000 @@ -0,0 +1,1737 @@ +/* + * Copyright 2009-2010 10gen, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This file contains C implementations of some of the functions needed by the + * bson module. If possible, these implementations should be used to speed up + * BSON encoding and decoding. + * + * TODO The filename is a bit of a misnomer now - probably should be something + * like _cspeedupsmodule - we do more than just BSON stuff in this C module. + */ + + +#include + +#include +#include + +#include "time64.h" +#include "encoding_helpers.h" + +static PyObject* SON = NULL; +static PyObject* Binary = NULL; +static PyObject* Code = NULL; +static PyObject* ObjectId = NULL; +static PyObject* DBRef = NULL; +static PyObject* RECompile = NULL; +static PyObject* UUID = NULL; +static PyObject* Timestamp = NULL; +static PyObject* MinKey = NULL; +static PyObject* MaxKey = NULL; +static PyTypeObject* REType = NULL; + +#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) +typedef int Py_ssize_t; +#define PY_SSIZE_T_MAX INT_MAX +#define PY_SSIZE_T_MIN INT_MIN +#endif + +#if PY_VERSION_HEX < 0x02050000 +#define WARN(category, message) \ + PyErr_Warn((category), (message)) +#else +#define WARN(category, message) \ + PyErr_WarnEx((category), (message), 1) +#endif + +#define INITIAL_BUFFER_SIZE 256 + +/* Maximum number of regex flags */ +#define FLAGS_SIZE 7 + +#if defined(WIN32) || defined(_MSC_VER) +/* This macro is basically an implementation of asprintf for win32 + * We get the length of the int as string and malloc a buffer for it, + * returning -1 if that malloc fails. We then actually print to the + * buffer to get the string value as an int. Like asprintf, the result + * must be explicitly free'd when done being used. + */ +#if defined(_MSC_VER) && (_MSC_VER >= 1400) +#define INT2STRING(buffer, i) \ + *(buffer) = malloc(_scprintf("%d", (i)) + 1), \ + (!(buffer) ? \ + -1 : \ + _snprintf_s(*(buffer), \ + _scprintf("%d", (i)) + 1, \ + _scprintf("%d", (i)) + 1, \ + "%d", \ + (i))) +#define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) +#else +#define INT2STRING(buffer, i) \ + *(buffer) = malloc(_scprintf("%d", (i)) + 1), \ + (!(buffer) ? \ + -1 : \ + _snprintf(*(buffer), \ + _scprintf("%d", (i)) + 1, \ + "%d", \ + (i))) +#define STRCAT(dest, n, src) strcat((dest), (src)) +#endif +#else +#define INT2STRING(buffer, i) asprintf((buffer), "%d", (i)) +#define STRCAT(dest, n, src) strcat((dest), (src)) +#endif + + +/* Date stuff */ +static PyObject* datetime_from_millis(long long millis) { + int microseconds = (millis % 1000) * 1000; + Time64_T seconds = millis / 1000; + struct TM timeinfo; + gmtime64_r(&seconds, &timeinfo); + + return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, + timeinfo.tm_mon + 1, + timeinfo.tm_mday, + timeinfo.tm_hour, + timeinfo.tm_min, + timeinfo.tm_sec, + microseconds); +} + +static long long millis_from_datetime(PyObject* datetime) { + struct TM timeinfo; + long long millis; + + timeinfo.tm_year = PyDateTime_GET_YEAR(datetime) - 1900; + timeinfo.tm_mon = PyDateTime_GET_MONTH(datetime) - 1; + timeinfo.tm_mday = PyDateTime_GET_DAY(datetime); + timeinfo.tm_hour = PyDateTime_DATE_GET_HOUR(datetime); + timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); + timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); + + millis = timegm64(&timeinfo) * 1000; + millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; + return millis; +} + + +/* A buffer representing some data being encoded to BSON. */ +typedef struct { + char* buffer; + int size; + int position; +} bson_buffer; + +static int write_dict(bson_buffer* buffer, PyObject* dict, + unsigned char check_keys, unsigned char top_level); +static PyObject* elements_to_dict(const char* string, int max, PyObject* as_class); + +static bson_buffer* buffer_new(void) { + bson_buffer* buffer; + buffer = (bson_buffer*)malloc(sizeof(bson_buffer)); + if (!buffer) { + PyErr_NoMemory(); + return NULL; + } + buffer->size = INITIAL_BUFFER_SIZE; + buffer->position = 0; + buffer->buffer = (char*)malloc(INITIAL_BUFFER_SIZE); + if (!buffer->buffer) { + PyErr_NoMemory(); + return NULL; + } + return buffer; +} + +static void buffer_free(bson_buffer* buffer) { + if (buffer == NULL) { + return; + } + free(buffer->buffer); + free(buffer); +} + +/* returns zero on failure */ +static int buffer_resize(bson_buffer* buffer, int min_length) { + int size = buffer->size; + if (size >= min_length) { + return 1; + } + while (size < min_length) { + size *= 2; + } + buffer->buffer = (char*)realloc(buffer->buffer, size); + if (!buffer->buffer) { + PyErr_NoMemory(); + return 0; + } + buffer->size = size; + return 1; +} + +/* returns zero on failure */ +static int buffer_assure_space(bson_buffer* buffer, int size) { + if (buffer->position + size <= buffer->size) { + return 1; + } + return buffer_resize(buffer, buffer->position + size); +} + +/* returns offset for writing, or -1 on failure */ +static int buffer_save_bytes(bson_buffer* buffer, int size) { + int position; + + if (!buffer_assure_space(buffer, size)) { + return -1; + } + position = buffer->position; + buffer->position += size; + return position; +} + +/* returns zero on failure */ +static int buffer_write_bytes(bson_buffer* buffer, const char* bytes, int size) { + if (!buffer_assure_space(buffer, size)) { + return 0; + } + memcpy(buffer->buffer + buffer->position, bytes, size); + buffer->position += size; + return 1; +} + +/* returns 0 on failure */ +static int write_string(bson_buffer* buffer, PyObject* py_string) { + Py_ssize_t string_length; + const char* string = PyString_AsString(py_string); + if (!string) { + return 1; + } + string_length = PyString_Size(py_string) + 1; + + if (!buffer_write_bytes(buffer, (const char*)&string_length, 4)) { + return 0; + } + if (!buffer_write_bytes(buffer, string, string_length)) { + return 0; + } + return 1; +} + +/* Get an error class from the pymongo.errors module. + * + * Returns a new ref */ +static PyObject* _error(char* name) { + PyObject* error; + PyObject* errors = PyImport_ImportModule("pymongo.errors"); + if (!errors) { + return NULL; + } + error = PyObject_GetAttrString(errors, name); + Py_DECREF(errors); + return error; +} + +/* Reload a cached Python object. + * + * Returns non-zero on failure. */ +static int _reload_object(PyObject** object, char* module_name, char* object_name) { + PyObject* module; + + module = PyImport_ImportModule(module_name); + if (!module) { + return 1; + } + + *object = PyObject_GetAttrString(module, object_name); + Py_DECREF(module); + + return (*object) ? 0 : 2; +} + +/* Reload all cached Python objects. + * + * Returns non-zero on failure. */ +static int _reload_python_objects(void) { + if (_reload_object(&SON, "pymongo.son", "SON") || + _reload_object(&Binary, "pymongo.binary", "Binary") || + _reload_object(&Code, "pymongo.code", "Code") || + _reload_object(&ObjectId, "pymongo.objectid", "ObjectId") || + _reload_object(&DBRef, "pymongo.dbref", "DBRef") || + _reload_object(&Timestamp, "pymongo.timestamp", "Timestamp") || + _reload_object(&MinKey, "pymongo.min_key", "MinKey") || + _reload_object(&MaxKey, "pymongo.max_key", "MaxKey") || + _reload_object(&RECompile, "re", "compile")) { + return 1; + } + /* If we couldn't import uuid then we must be on 2.4. Just ignore. */ + if (_reload_object(&UUID, "uuid", "UUID") == 1) { + UUID = NULL; + PyErr_Clear(); + } + /* Reload our REType hack too. */ + REType = PyObject_CallFunction(RECompile, "O", + PyString_FromString(""))->ob_type; + return 0; +} + +/* TODO our platform better be little-endian w/ 4-byte ints! */ +/* Write a single value to the buffer (also write it's type_byte, for which + * space has already been reserved. + * + * returns 0 on failure */ +static int write_element_to_buffer(bson_buffer* buffer, int type_byte, PyObject* value, unsigned char check_keys, unsigned char first_attempt) { + /* TODO this isn't quite the same as the Python version: + * here we check for type equivalence, not isinstance in some + * places. */ + if (PyInt_CheckExact(value) || PyLong_CheckExact(value)) { + const long long_value = PyInt_AsLong(value); + const int int_value = (int)long_value; + if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ + long long long_long_value; + PyErr_Clear(); + long_long_value = PyLong_AsLongLong(value); + if (PyErr_Occurred()) { /* Overflow AGAIN */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + return 0; + } + *(buffer->buffer + type_byte) = 0x12; + return buffer_write_bytes(buffer, (const char*)&long_long_value, 8); + } + *(buffer->buffer + type_byte) = 0x10; + return buffer_write_bytes(buffer, (const char*)&int_value, 4); + } else if (PyBool_Check(value)) { + const long bool = PyInt_AsLong(value); + const char c = bool ? 0x01 : 0x00; + *(buffer->buffer + type_byte) = 0x08; + return buffer_write_bytes(buffer, &c, 1); + } else if (PyFloat_CheckExact(value)) { + const double d = PyFloat_AsDouble(value); + *(buffer->buffer + type_byte) = 0x01; + return buffer_write_bytes(buffer, (const char*)&d, 8); + } else if (value == Py_None) { + *(buffer->buffer + type_byte) = 0x0A; + return 1; + } else if (PyDict_Check(value)) { + *(buffer->buffer + type_byte) = 0x03; + return write_dict(buffer, value, check_keys, 0); + } else if (PyList_Check(value) || PyTuple_Check(value)) { + int start_position, + length_location, + items, + length, + i; + char zero = 0; + + *(buffer->buffer + type_byte) = 0x04; + start_position = buffer->position; + + /* save space for length */ + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1) { + return 0; + } + + items = PySequence_Size(value); + for(i = 0; i < items; i++) { + int list_type_byte = buffer_save_bytes(buffer, 1); + char* name; + PyObject* item_value; + + if (type_byte == -1) { + return 0; + } + if (INT2STRING(&name, i) < 0 || !name) { + PyErr_NoMemory(); + return 0; + } + if (!buffer_write_bytes(buffer, name, strlen(name) + 1)) { + free(name); + return 0; + } + free(name); + + item_value = PySequence_GetItem(value, i); + if (!write_element_to_buffer(buffer, list_type_byte, item_value, check_keys, 1)) { + Py_DECREF(item_value); + return 0; + } + Py_DECREF(item_value); + } + + /* write null byte and fill in length */ + if (!buffer_write_bytes(buffer, &zero, 1)) { + return 0; + } + length = buffer->position - start_position; + memcpy(buffer->buffer + length_location, &length, 4); + return 1; + } else if (PyObject_IsInstance(value, Binary)) { + PyObject* subtype_object; + + *(buffer->buffer + type_byte) = 0x05; + subtype_object = PyObject_GetAttrString(value, "subtype"); + if (!subtype_object) { + return 0; + } + { + const long long_subtype = PyInt_AsLong(subtype_object); + const char subtype = (const char)long_subtype; + const int length = PyString_Size(value); + + Py_DECREF(subtype_object); + if (subtype == 2) { + const int other_length = length + 4; + if (!buffer_write_bytes(buffer, (const char*)&other_length, 4)) { + return 0; + } + if (!buffer_write_bytes(buffer, &subtype, 1)) { + return 0; + } + } + if (!buffer_write_bytes(buffer, (const char*)&length, 4)) { + return 0; + } + if (subtype != 2) { + if (!buffer_write_bytes(buffer, &subtype, 1)) { + return 0; + } + } + { + const char* string = PyString_AsString(value); + if (!string) { + return 0; + } + if (!buffer_write_bytes(buffer, string, length)) { + return 0; + } + } + } + return 1; + } else if (UUID && PyObject_IsInstance(value, UUID)) { + // Just a special case of Binary above, but simpler to do as a separate case + + // UUID is always 16 bytes, subtype 3 + int length = 16; + const char subtype = 3; + + PyObject* bytes; + + *(buffer->buffer + type_byte) = 0x05; + if (!buffer_write_bytes(buffer, (const char*)&length, 4)) { + return 0; + } + if (!buffer_write_bytes(buffer, &subtype, 1)) { + return 0; + } + + bytes = PyObject_GetAttrString(value, "bytes"); + if (!bytes) { + return 0; + } + if (!buffer_write_bytes(buffer, PyString_AsString(bytes), length)) { + Py_DECREF(bytes); + return 0; + } + Py_DECREF(bytes); + return 1; + } else if (PyObject_IsInstance(value, Code)) { + int start_position, + length_location, + length; + PyObject* scope; + + *(buffer->buffer + type_byte) = 0x0F; + + start_position = buffer->position; + /* save space for length */ + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1) { + return 0; + } + + if (!write_string(buffer, value)) { + return 0; + } + + scope = PyObject_GetAttrString(value, "scope"); + if (!scope) { + return 0; + } + if (!write_dict(buffer, scope, 0, 0)) { + Py_DECREF(scope); + return 0; + } + Py_DECREF(scope); + + length = buffer->position - start_position; + memcpy(buffer->buffer + length_location, &length, 4); + return 1; + } else if (PyString_Check(value)) { + int result; + result_t status; + + *(buffer->buffer + type_byte) = 0x02; + status = check_string((const unsigned char*)PyString_AsString(value), + PyString_Size(value), 1, 0); + if (status == NOT_UTF_8) { + PyObject* InvalidStringData = _error("InvalidStringData"); + PyErr_SetString(InvalidStringData, + "strings in documents must be valid UTF-8"); + Py_DECREF(InvalidStringData); + return 0; + } + result = write_string(buffer, value); + return result; + } else if (PyUnicode_Check(value)) { + PyObject* encoded; + int result; + + *(buffer->buffer + type_byte) = 0x02; + encoded = PyUnicode_AsUTF8String(value); + if (!encoded) { + return 0; + } + result = write_string(buffer, encoded); + Py_DECREF(encoded); + return result; + } else if (PyDateTime_CheckExact(value)) { + long long millis; + PyObject* utcoffset = PyObject_CallMethod(value, "utcoffset", NULL); + if (utcoffset != Py_None) { + PyObject* result = PyNumber_Subtract(value, utcoffset); + Py_DECREF(utcoffset); + if (!result) { + return 0; + } + millis = millis_from_datetime(result); + Py_DECREF(result); + } else { + millis = millis_from_datetime(value); + } + *(buffer->buffer + type_byte) = 0x09; + return buffer_write_bytes(buffer, (const char*)&millis, 8); + } else if (PyObject_IsInstance(value, ObjectId)) { + PyObject* pystring = PyObject_GetAttrString(value, "_ObjectId__id"); + if (!pystring) { + return 0; + } + { + const char* as_string = PyString_AsString(pystring); + if (!as_string) { + Py_DECREF(pystring); + return 0; + } + if (!buffer_write_bytes(buffer, as_string, 12)) { + Py_DECREF(pystring); + return 0; + } + Py_DECREF(pystring); + *(buffer->buffer + type_byte) = 0x07; + } + return 1; + } else if (PyObject_IsInstance(value, DBRef)) { + PyObject* as_doc = PyObject_CallMethod(value, "as_doc", NULL); + if (!as_doc) { + return 0; + } + if (!write_dict(buffer, as_doc, 0, 0)) { + Py_DECREF(as_doc); + return 0; + } + Py_DECREF(as_doc); + *(buffer->buffer + type_byte) = 0x03; + return 1; + } else if (PyObject_IsInstance(value, Timestamp)) { + PyObject* obj; + long i; + + obj = PyObject_GetAttrString(value, "inc"); + if (!obj) { + return 0; + } + i = PyInt_AsLong(obj); + Py_DECREF(obj); + if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { + return 0; + } + + obj = PyObject_GetAttrString(value, "time"); + if (!obj) { + return 0; + } + i = PyInt_AsLong(obj); + Py_DECREF(obj); + if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { + return 0; + } + + *(buffer->buffer + type_byte) = 0x11; + return 1; + } + else if (PyObject_TypeCheck(value, REType)) { + PyObject* py_flags = PyObject_GetAttrString(value, "flags"); + PyObject* py_pattern; + PyObject* encoded_pattern; + long int_flags; + char flags[FLAGS_SIZE]; + char check_utf8 = 0; + int pattern_length, + flags_length; + result_t status; + + if (!py_flags) { + return 0; + } + int_flags = PyInt_AsLong(py_flags); + Py_DECREF(py_flags); + py_pattern = PyObject_GetAttrString(value, "pattern"); + if (!py_pattern) { + return 0; + } + + if (PyUnicode_Check(py_pattern)) { + encoded_pattern = PyUnicode_AsUTF8String(py_pattern); + Py_DECREF(py_pattern); + if (!encoded_pattern) { + return 0; + } + } else { + encoded_pattern = py_pattern; + check_utf8 = 1; + } + + status = check_string((const unsigned char*)PyString_AsString(encoded_pattern), + PyString_Size(encoded_pattern), check_utf8, 1); + if (status == NOT_UTF_8) { + PyObject* InvalidStringData = _error("InvalidStringData"); + PyErr_SetString(InvalidStringData, + "regex patterns must be valid UTF-8"); + Py_DECREF(InvalidStringData); + return 0; + } else if (status == HAS_NULL) { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyErr_SetString(InvalidDocument, + "regex patterns must not contain the NULL byte"); + Py_DECREF(InvalidDocument); + return 0; + } + + { + const char* pattern = PyString_AsString(encoded_pattern); + pattern_length = strlen(pattern) + 1; + + if (!buffer_write_bytes(buffer, pattern, pattern_length)) { + Py_DECREF(encoded_pattern); + return 0; + } + } + Py_DECREF(encoded_pattern); + + flags[0] = 0; + /* TODO don't hardcode these */ + if (int_flags & 2) { + STRCAT(flags, FLAGS_SIZE, "i"); + } + if (int_flags & 4) { + STRCAT(flags, FLAGS_SIZE, "l"); + } + if (int_flags & 8) { + STRCAT(flags, FLAGS_SIZE, "m"); + } + if (int_flags & 16) { + STRCAT(flags, FLAGS_SIZE, "s"); + } + if (int_flags & 32) { + STRCAT(flags, FLAGS_SIZE, "u"); + } + if (int_flags & 64) { + STRCAT(flags, FLAGS_SIZE, "x"); + } + flags_length = strlen(flags) + 1; + if (!buffer_write_bytes(buffer, flags, flags_length)) { + return 0; + } + *(buffer->buffer + type_byte) = 0x0B; + return 1; + } else if (PyObject_IsInstance(value, MinKey)) { + *(buffer->buffer + type_byte) = 0xFF; + return 1; + } else if (PyObject_IsInstance(value, MaxKey)) { + *(buffer->buffer + type_byte) = 0x7F; + return 1; + } else if (first_attempt) { + /* Try reloading the modules and having one more go at it. */ + if (WARN(PyExc_RuntimeWarning, "couldn't encode - reloading python " + "modules and trying again. if you see this without getting " + "an InvalidDocument exception please see http://api.mongodb" + ".org/python/current/faq.html#does-pymongo-work-with-mod-" + "wsgi") == -1) { + return 0; + } + if (_reload_python_objects()) { + return 0; + } + return write_element_to_buffer(buffer, type_byte, value, check_keys, 0); + } + { + PyObject* errmsg = PyString_FromString("Cannot encode object: "); + PyObject* repr = PyObject_Repr(value); + PyObject* InvalidDocument = _error("InvalidDocument"); + PyString_ConcatAndDel(&errmsg, repr); + PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); + Py_DECREF(errmsg); + Py_DECREF(InvalidDocument); + return 0; + } +} + +static int check_key_name(const char* name, + const Py_ssize_t name_length) { + int i; + if (name_length > 0 && name[0] == '$') { + PyObject* InvalidName = _error("InvalidName"); + PyObject* errmsg = PyString_FromFormat("key '%s' must not start with '$'", name); + PyErr_SetString(InvalidName, PyString_AsString(errmsg)); + Py_DECREF(errmsg); + Py_DECREF(InvalidName); + return 0; + } + for (i = 0; i < name_length; i++) { + if (name[i] == '.') { + PyObject* InvalidName = _error("InvalidName"); + PyObject* errmsg = PyString_FromFormat("key '%s' must not contain '.'", name); + PyErr_SetString(InvalidName, PyString_AsString(errmsg)); + Py_DECREF(errmsg); + Py_DECREF(InvalidName); + return 0; + } + } + return 1; +} + +/* Write a (key, value) pair to the buffer. + * + * Returns 0 on failure */ +static int write_pair(bson_buffer* buffer, const char* name, Py_ssize_t name_length, PyObject* value, unsigned char check_keys, unsigned char allow_id) { + int type_byte; + + /* Don't write any _id elements unless we're explicitly told to - + * _id has to be written first so we do so, but don't bother + * deleting it from the dictionary being written. */ + if (!allow_id && strcmp(name, "_id") == 0) { + return 1; + } + + type_byte = buffer_save_bytes(buffer, 1); + if (type_byte == -1) { + return 0; + } + if (check_keys && !check_key_name(name, name_length)) { + return 0; + } + if (!buffer_write_bytes(buffer, name, name_length + 1)) { + return 0; + } + if (!write_element_to_buffer(buffer, type_byte, value, check_keys, 1)) { + return 0; + } + return 1; +} + +static int decode_and_write_pair(bson_buffer* buffer, + PyObject* key, PyObject* value, + unsigned char check_keys, unsigned char top_level) { + PyObject* encoded; + if (PyUnicode_Check(key)) { + result_t status; + encoded = PyUnicode_AsUTF8String(key); + if (!encoded) { + return 0; + } + status = check_string((const unsigned char*)PyString_AsString(encoded), + PyString_Size(encoded), 0, 1); + + if (status == HAS_NULL) { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyErr_SetString(InvalidDocument, + "Key names must not contain the NULL byte"); + Py_DECREF(InvalidDocument); + return 0; + } + } else if (PyString_Check(key)) { + result_t status; + encoded = key; + Py_INCREF(encoded); + + status = check_string((const unsigned char*)PyString_AsString(encoded), + PyString_Size(encoded), 1, 1); + + if (status == NOT_UTF_8) { + PyObject* InvalidStringData = _error("InvalidStringData"); + PyErr_SetString(InvalidStringData, + "strings in documents must be valid UTF-8"); + Py_DECREF(InvalidStringData); + return 0; + } else if (status == HAS_NULL) { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyErr_SetString(InvalidDocument, + "Key names must not contain the NULL byte"); + Py_DECREF(InvalidDocument); + return 0; + } + } else { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyObject* errmsg = PyString_FromString("documents must have only string keys, key was "); + PyObject* repr = PyObject_Repr(key); + PyString_ConcatAndDel(&errmsg, repr); + PyErr_SetString(InvalidDocument, PyString_AsString(errmsg)); + Py_DECREF(InvalidDocument); + Py_DECREF(errmsg); + return 0; + } + + /* If top_level is True, don't allow writing _id here - it was already written. */ + if (!write_pair(buffer, PyString_AsString(encoded), + PyString_Size(encoded), value, check_keys, !top_level)) { + Py_DECREF(encoded); + return 0; + } + + Py_DECREF(encoded); + return 1; +} + +static int write_son(bson_buffer* buffer, PyObject* dict, int start_position, + int length_location, unsigned char check_keys, + unsigned char top_level) { + PyObject* keys = PyObject_CallMethod(dict, "keys", NULL); + int items, + i; + if (!keys) { + return 0; + } + items = PyList_Size(keys); + for(i = 0; i < items; i++) { + PyObject* key; + PyObject* value; + + key = PyList_GetItem(keys, i); + if (!key) { + Py_DECREF(keys); + return 0; + } + value = PyDict_GetItem(dict, key); + if (!value || + !decode_and_write_pair(buffer, key, value, check_keys, top_level)) { + Py_DECREF(keys); + return 0; + } + } + Py_DECREF(keys); + return 1; +} + +/* returns 0 on failure */ +static int write_dict(bson_buffer* buffer, PyObject* dict, unsigned char check_keys, unsigned char top_level) { + int start_position = buffer->position; + char zero = 0; + int length; + + int is_dict = PyDict_Check(dict); + + /* save space for length */ + int length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1) { + return 0; + } + + /* Write _id first if this is a top level doc. */ + if (is_dict && top_level) { + PyObject* _id = PyDict_GetItemString(dict, "_id"); + if (_id) { + /* Don't bother checking keys, but do make sure we're allowed to + * write _id */ + if (!write_pair(buffer, "_id", 3, _id, 0, 1)) { + return 0; + } + } + } + + if (PyObject_IsInstance(dict, SON)) { + if (!write_son(buffer, dict, start_position, length_location, check_keys, top_level)) { + return 0; + } + } else if (is_dict) { + PyObject* key; + PyObject* value; + Py_ssize_t pos = 0; + + while (PyDict_Next(dict, &pos, &key, &value)) { + if (!decode_and_write_pair(buffer, key, value, check_keys, top_level)) { + return 0; + } + } + } else { + /* Try a reload! */ + _reload_python_objects(); + if (PyObject_IsInstance(dict, SON)) { + if (!write_son(buffer, dict, start_position, length_location, check_keys, top_level)) { + return 0; + } + } + else { + PyObject* errmsg = PyString_FromString("encoder expected a mapping type but got: "); + PyObject* repr = PyObject_Repr(dict); + PyString_ConcatAndDel(&errmsg, repr); + PyErr_SetString(PyExc_TypeError, PyString_AsString(errmsg)); + Py_DECREF(errmsg); + return 0; + } + } + + /* write null byte and fill in length */ + if (!buffer_write_bytes(buffer, &zero, 1)) { + return 0; + } + length = buffer->position - start_position; + if (length > 4 * 1024 * 1024) { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyErr_SetString(InvalidDocument, "document too large - " + "BSON documents are limited to 4 MB"); + Py_DECREF(InvalidDocument); + return 0; + } + memcpy(buffer->buffer + length_location, &length, 4); + return 1; +} + +static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { + PyObject* dict; + PyObject* result; + unsigned char check_keys; + bson_buffer* buffer; + + if (!PyArg_ParseTuple(args, "Ob", &dict, &check_keys)) { + return NULL; + } + + buffer = buffer_new(); + if (!buffer) { + return NULL; + } + + if (!write_dict(buffer, dict, check_keys, 1)) { + buffer_free(buffer); + return NULL; + } + + /* objectify buffer */ + result = Py_BuildValue("s#", buffer->buffer, buffer->position); + buffer_free(buffer); + return result; +} + +/* add a lastError message on the end of the buffer. + * returns 0 on failure */ +static int add_last_error(bson_buffer* buffer, int request_id) { + /* message length: 62 */ + if (!buffer_write_bytes(buffer, "\x3E\x00\x00\x00", 4) || + !buffer_write_bytes(buffer, (const char*)&request_id, 4) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" /* responseTo */ + "\xd4\x07\x00\x00" /* opcode */ + "\x00\x00\x00\x00" /* options */ + "admin.$cmd\x00" /* collection name */ + "\x00\x00\x00\x00" /* skip */ + "\xFF\xFF\xFF\xFF" /* limit (-1) */ + "\x17\x00\x00\x00" /* {getlasterror: 1} */ + "\x10getlasterror" /* ... */ + "\x00\x01\x00\x00" /* ... */ + "\x00\x00", /* ... */ + 54)) { + return 0; + } + return 1; +} + +static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { + /* NOTE just using a random number as the request_id */ + int request_id = rand(); + char* collection_name = NULL; + int collection_name_length; + PyObject* docs; + int list_length; + int i; + unsigned char check_keys; + unsigned char safe; + bson_buffer* buffer; + int length_location; + PyObject* result; + + if (!PyArg_ParseTuple(args, "et#Obb", + "utf-8", + &collection_name, + &collection_name_length, + &docs, &check_keys, &safe)) { + return NULL; + } + + buffer = buffer_new(); + if (!buffer) { + PyMem_Free(collection_name); + return NULL; + } + + // save space for message length + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1 || + !buffer_write_bytes(buffer, (const char*)&request_id, 4) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" + "\xd2\x07\x00\x00" + "\x00\x00\x00\x00", + 12) || + !buffer_write_bytes(buffer, + collection_name, + collection_name_length + 1)) { + PyMem_Free(collection_name); + buffer_free(buffer); + return NULL; + } + + PyMem_Free(collection_name); + + list_length = PyList_Size(docs); + if (list_length <= 0) { + PyObject* InvalidOperation = _error("InvalidOperation"); + PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); + Py_DECREF(InvalidOperation); + buffer_free(buffer); + return NULL; + } + for (i = 0; i < list_length; i++) { + PyObject* doc = PyList_GetItem(docs, i); + if (!write_dict(buffer, doc, check_keys, 1)) { + buffer_free(buffer); + return NULL; + } + } + + memcpy(buffer->buffer + length_location, &buffer->position, 4); + + if (safe) { + if (!add_last_error(buffer, request_id)) { + buffer_free(buffer); + return NULL; + } + } + + /* objectify buffer */ + result = Py_BuildValue("is#", request_id, + buffer->buffer, buffer->position); + buffer_free(buffer); + return result; +} + +static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { + /* NOTE just using a random number as the request_id */ + int request_id = rand(); + char* collection_name = NULL; + int collection_name_length; + PyObject* doc; + PyObject* spec; + unsigned char multi; + unsigned char upsert; + unsigned char safe; + int options; + bson_buffer* buffer; + int length_location; + PyObject* result; + + if (!PyArg_ParseTuple(args, "et#bbOOb", + "utf-8", + &collection_name, + &collection_name_length, + &upsert, &multi, &spec, &doc, &safe)) { + return NULL; + } + + options = 0; + if (upsert) { + options += 1; + } + if (multi) { + options += 2; + } + buffer = buffer_new(); + if (!buffer) { + PyMem_Free(collection_name); + return NULL; + } + + // save space for message length + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1 || + !buffer_write_bytes(buffer, (const char*)&request_id, 4) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" + "\xd1\x07\x00\x00" + "\x00\x00\x00\x00", + 12) || + !buffer_write_bytes(buffer, + collection_name, + collection_name_length + 1) || + !buffer_write_bytes(buffer, (const char*)&options, 4) || + !write_dict(buffer, spec, 0, 1) || + !write_dict(buffer, doc, 0, 1)) { + buffer_free(buffer); + PyMem_Free(collection_name); + return NULL; + } + + PyMem_Free(collection_name); + + memcpy(buffer->buffer + length_location, &buffer->position, 4); + + if (safe) { + if (!add_last_error(buffer, request_id)) { + buffer_free(buffer); + return NULL; + } + } + + /* objectify buffer */ + result = Py_BuildValue("is#", request_id, + buffer->buffer, buffer->position); + buffer_free(buffer); + return result; +} + +static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { + /* NOTE just using a random number as the request_id */ + int request_id = rand(); + unsigned int options; + char* collection_name = NULL; + int collection_name_length; + int num_to_skip; + int num_to_return; + PyObject* query; + PyObject* field_selector = Py_None; + bson_buffer* buffer; + int length_location; + PyObject* result; + + if (!PyArg_ParseTuple(args, "Iet#iiO|O", + &options, + "utf-8", + &collection_name, + &collection_name_length, + &num_to_skip, &num_to_return, + &query, &field_selector)) { + return NULL; + } + buffer = buffer_new(); + if (!buffer) { + PyMem_Free(collection_name); + return NULL; + } + + // save space for message length + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1 || + !buffer_write_bytes(buffer, (const char*)&request_id, 4) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" + "\xd4\x07\x00\x00", 8) || + !buffer_write_bytes(buffer, (const char*)&options, 4) || + !buffer_write_bytes(buffer, + collection_name, + collection_name_length + 1) || + !buffer_write_bytes(buffer, (const char*)&num_to_skip, 4) || + !buffer_write_bytes(buffer, (const char*)&num_to_return, 4) || + !write_dict(buffer, query, 0, 1) || + ((field_selector != Py_None) && + !write_dict(buffer, field_selector, 0, 1))) { + buffer_free(buffer); + PyMem_Free(collection_name); + return NULL; + } + + PyMem_Free(collection_name); + + memcpy(buffer->buffer + length_location, &buffer->position, 4); + + /* objectify buffer */ + result = Py_BuildValue("is#", request_id, + buffer->buffer, buffer->position); + buffer_free(buffer); + return result; +} + +static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { + /* NOTE just using a random number as the request_id */ + int request_id = rand(); + char* collection_name = NULL; + int collection_name_length; + int num_to_return; + long long cursor_id; + bson_buffer* buffer; + int length_location; + PyObject* result; + + if (!PyArg_ParseTuple(args, "et#iL", + "utf-8", + &collection_name, + &collection_name_length, + &num_to_return, + &cursor_id)) { + return NULL; + } + buffer = buffer_new(); + if (!buffer) { + PyMem_Free(collection_name); + return NULL; + } + + // save space for message length + length_location = buffer_save_bytes(buffer, 4); + if (length_location == -1 || + !buffer_write_bytes(buffer, (const char*)&request_id, 4) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" + "\xd5\x07\x00\x00" + "\x00\x00\x00\x00", 12) || + !buffer_write_bytes(buffer, + collection_name, + collection_name_length + 1) || + !buffer_write_bytes(buffer, (const char*)&num_to_return, 4) || + !buffer_write_bytes(buffer, (const char*)&cursor_id, 8)) { + buffer_free(buffer); + PyMem_Free(collection_name); + return NULL; + } + + PyMem_Free(collection_name); + + memcpy(buffer->buffer + length_location, &buffer->position, 4); + + /* objectify buffer */ + result = Py_BuildValue("is#", request_id, + buffer->buffer, buffer->position); + buffer_free(buffer); + return result; +} + +static PyObject* get_value(const char* buffer, int* position, int type, + PyObject* as_class) { + PyObject* value; + switch (type) { + case 1: + { + double d; + memcpy(&d, buffer + *position, 8); + value = PyFloat_FromDouble(d); + if (!value) { + return NULL; + } + *position += 8; + break; + } + case 2: + case 13: + case 14: + { + int value_length = ((int*)(buffer + *position))[0] - 1; + *position += 4; + value = PyUnicode_DecodeUTF8(buffer + *position, value_length, "strict"); + if (!value) { + return NULL; + } + *position += value_length + 1; + break; + } + case 3: + { + int size; + memcpy(&size, buffer + *position, 4); + value = elements_to_dict(buffer + *position + 4, size - 5, as_class); + if (!value) { + return NULL; + } + + /* Decoding for DBRefs */ + if (strcmp(buffer + *position + 5, "$ref") == 0) { /* DBRef */ + PyObject* id = PyDict_GetItemString(value, "$id"); + PyObject* collection = PyDict_GetItemString(value, "$ref"); + PyObject* database = PyDict_GetItemString(value, "$db"); + + /* This works even if there is no $db since database will be NULL and + the call will be as if there were only two arguments specified. */ + value = PyObject_CallFunctionObjArgs(DBRef, collection, id, database, NULL); + } + + *position += size; + break; + } + case 4: + { + int size, + end; + + memcpy(&size, buffer + *position, 4); + end = *position + size - 1; + *position += 4; + + value = PyList_New(0); + if (!value) { + return NULL; + } + while (*position < end) { + PyObject* to_append; + + int type = (int)buffer[(*position)++]; + int key_size = strlen(buffer + *position); + *position += key_size + 1; /* just skip the key, they're in order. */ + to_append = get_value(buffer, position, type, as_class); + if (!to_append) { + return NULL; + } + PyList_Append(value, to_append); + Py_DECREF(to_append); + } + (*position)++; + break; + } + case 5: + { + PyObject* data; + PyObject* st; + int length, + subtype; + + memcpy(&length, buffer + *position, 4); + subtype = (unsigned char)buffer[*position + 4]; + + if (subtype == 2) { + data = PyString_FromStringAndSize(buffer + *position + 9, length - 4); + } else { + data = PyString_FromStringAndSize(buffer + *position + 5, length); + } + if (!data) { + return NULL; + } + + if (subtype == 3 && UUID) { // Encode as UUID, not Binary + PyObject* kwargs; + PyObject* args = PyTuple_New(0); + if (!args) { + return NULL; + } + kwargs = PyDict_New(); + if (!kwargs) { + Py_DECREF(args); + return NULL; + } + + assert(length == 16); // UUID should always be 16 bytes + + PyDict_SetItemString(kwargs, "bytes", data); + value = PyObject_Call(UUID, args, kwargs); + + Py_DECREF(args); + Py_DECREF(kwargs); + Py_DECREF(data); + if (!value) { + return NULL; + } + + *position += length + 5; + break; + } + + st = PyInt_FromLong(subtype); + if (!st) { + Py_DECREF(data); + return NULL; + } + value = PyObject_CallFunctionObjArgs(Binary, data, st, NULL); + Py_DECREF(st); + Py_DECREF(data); + if (!value) { + return NULL; + } + *position += length + 5; + break; + } + case 6: + case 10: + { + value = Py_None; + Py_INCREF(value); + break; + } + case 7: + { + value = PyObject_CallFunction(ObjectId, "s#", buffer + *position, 12); + if (!value) { + return NULL; + } + *position += 12; + break; + } + case 8: + { + value = buffer[(*position)++] ? Py_True : Py_False; + Py_INCREF(value); + break; + } + case 9: + { + value = datetime_from_millis(*(long long*)(buffer + *position)); + *position += 8; + break; + } + case 11: + { + int flags_length, + flags, + i; + + int pattern_length = strlen(buffer + *position); + PyObject* pattern = PyUnicode_DecodeUTF8(buffer + *position, pattern_length, "strict"); + if (!pattern) { + return NULL; + } + *position += pattern_length + 1; + flags_length = strlen(buffer + *position); + flags = 0; + for (i = 0; i < flags_length; i++) { + if (buffer[*position + i] == 'i') { + flags |= 2; + } else if (buffer[*position + i] == 'l') { + flags |= 4; + } else if (buffer[*position + i] == 'm') { + flags |= 8; + } else if (buffer[*position + i] == 's') { + flags |= 16; + } else if (buffer[*position + i] == 'u') { + flags |= 32; + } else if (buffer[*position + i] == 'x') { + flags |= 64; + } + } + *position += flags_length + 1; + value = PyObject_CallFunction(RECompile, "Oi", pattern, flags); + Py_DECREF(pattern); + break; + } + case 12: + { + int collection_length; + PyObject* collection; + PyObject* id; + + *position += 4; + collection_length = strlen(buffer + *position); + collection = PyUnicode_DecodeUTF8(buffer + *position, collection_length, "strict"); + if (!collection) { + return NULL; + } + *position += collection_length + 1; + id = PyObject_CallFunction(ObjectId, "s#", buffer + *position, 12); + if (!id) { + Py_DECREF(collection); + return NULL; + } + *position += 12; + value = PyObject_CallFunctionObjArgs(DBRef, collection, id, NULL); + Py_DECREF(collection); + Py_DECREF(id); + break; + } + case 15: + { + int code_length, + scope_size; + PyObject* code; + PyObject* scope; + + *position += 8; + code_length = strlen(buffer + *position); + code = PyUnicode_DecodeUTF8(buffer + *position, code_length, "strict"); + if (!code) { + return NULL; + } + *position += code_length + 1; + + memcpy(&scope_size, buffer + *position, 4); + scope = elements_to_dict(buffer + *position + 4, scope_size - 5, (PyObject*)&PyDict_Type); + if (!scope) { + Py_DECREF(code); + return NULL; + } + *position += scope_size; + + value = PyObject_CallFunctionObjArgs(Code, code, scope, NULL); + Py_DECREF(code); + Py_DECREF(scope); + break; + } + case 16: + { + int i; + memcpy(&i, buffer + *position, 4); + value = PyInt_FromLong(i); + if (!value) { + return NULL; + } + *position += 4; + break; + } + case 17: + { + unsigned int time, inc; + memcpy(&inc, buffer + *position, 4); + memcpy(&time, buffer + *position + 4, 4); + value = PyObject_CallFunction(Timestamp, "II", time, inc); + if (!value) { + return NULL; + } + *position += 8; + break; + } + case 18: + { + long long ll; + memcpy(&ll, buffer + *position, 8); + value = PyLong_FromLongLong(ll); + if (!value) { + return NULL; + } + *position += 8; + break; + } + case -1: + { + value = PyObject_CallFunctionObjArgs(MinKey, NULL); + break; + } + case 127: + { + value = PyObject_CallFunctionObjArgs(MaxKey, NULL); + break; + } + default: + { + PyObject* InvalidDocument = _error("InvalidDocument"); + PyErr_SetString(InvalidDocument, "no c decoder for this type yet"); + Py_DECREF(InvalidDocument); + return NULL; + } + } + return value; +} + +static PyObject* elements_to_dict(const char* string, int max, PyObject* as_class) { + int position = 0; + PyObject* dict = PyObject_CallObject(as_class, NULL); + if (!dict) { + return NULL; + } + while (position < max) { + int type = (int)string[position++]; + int name_length = strlen(string + position); + PyObject* name = PyUnicode_DecodeUTF8(string + position, name_length, "strict"); + PyObject* value; + if (!name) { + return NULL; + } + position += name_length + 1; + value = get_value(string, &position, type, as_class); + if (!value) { + return NULL; + } + + PyObject_SetItem(dict, name, value); + Py_DECREF(name); + Py_DECREF(value); + } + return dict; +} + +static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { + unsigned int size; + Py_ssize_t total_size; + const char* string; + PyObject* bson; + PyObject* as_class; + PyObject* dict; + PyObject* remainder; + PyObject* result; + + if (!PyArg_ParseTuple(args, "OO", &bson, &as_class)) { + return NULL; + } + + if (!PyString_Check(bson)) { + PyErr_SetString(PyExc_TypeError, "argument to _bson_to_dict must be a string"); + return NULL; + } + total_size = PyString_Size(bson); + if (total_size < 5) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + return NULL; + } + + string = PyString_AsString(bson); + if (!string) { + return NULL; + } + memcpy(&size, string, 4); + + if (total_size < size) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "objsize too large"); + Py_DECREF(InvalidBSON); + return NULL; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "bad eoo"); + Py_DECREF(InvalidBSON); + return NULL; + } + + dict = elements_to_dict(string + 4, size - 5, as_class); + if (!dict) { + return NULL; + } + remainder = PyString_FromStringAndSize(string + size, total_size - size); + if (!remainder) { + Py_DECREF(dict); + return NULL; + } + result = Py_BuildValue("OO", dict, remainder); + Py_DECREF(dict); + Py_DECREF(remainder); + return result; +} + +static PyObject* _cbson_to_dicts(PyObject* self, PyObject* args) { + unsigned int size; + Py_ssize_t total_size; + const char* string; + PyObject* bson; + PyObject* dict; + PyObject* result; + PyObject* as_class = (PyObject*)&PyDict_Type; + + if (!PyArg_ParseTuple(args, "O|O", &bson, &as_class)) { + return NULL; + } + + if (!PyString_Check(bson)) { + PyErr_SetString(PyExc_TypeError, "argument to _to_dicts must be a string"); + return NULL; + } + total_size = PyString_Size(bson); + string = PyString_AsString(bson); + if (!string) { + return NULL; + } + + result = PyList_New(0); + + while (total_size > 0) { + if (total_size < 5) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + return NULL; + } + + memcpy(&size, string, 4); + + if (total_size < size) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "objsize too large"); + Py_DECREF(InvalidBSON); + return NULL; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + PyErr_SetString(InvalidBSON, + "bad eoo"); + Py_DECREF(InvalidBSON); + return NULL; + } + + dict = elements_to_dict(string + 4, size - 5, as_class); + if (!dict) { + return NULL; + } + PyList_Append(result, dict); + Py_DECREF(dict); + string += size; + total_size -= size; + } + + return result; +} + +static PyMethodDef _CBSONMethods[] = { + {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, + "convert a dictionary to a string containing it's BSON representation."}, + {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, + "convert a BSON string to a SON object."}, + {"_to_dicts", _cbson_to_dicts, METH_VARARGS, + "convert binary data to a sequence of SON objects."}, + {"_insert_message", _cbson_insert_message, METH_VARARGS, + "create an insert message to be sent to MongoDB"}, + {"_update_message", _cbson_update_message, METH_VARARGS, + "create an update message to be sent to MongoDB"}, + {"_query_message", _cbson_query_message, METH_VARARGS, + "create a query message to be sent to MongoDB"}, + {"_get_more_message", _cbson_get_more_message, METH_VARARGS, + "create a get more message to be sent to MongoDB"}, + {NULL, NULL, 0, NULL} +}; + +PyMODINIT_FUNC init_cbson(void) { + PyObject *m; + + PyDateTime_IMPORT; + m = Py_InitModule("_cbson", _CBSONMethods); + if (m == NULL) { + return; + } + + // TODO we don't do any error checking here, should we be? + _reload_python_objects(); +} diff -Nru pymongo-1.11/pymongo/_cmessagemodule.c pymongo-1.7/pymongo/_cmessagemodule.c --- pymongo-1.11/pymongo/_cmessagemodule.c 2011-03-14 22:30:07.000000000 +0000 +++ pymongo-1.7/pymongo/_cmessagemodule.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,468 +0,0 @@ -/* - * Copyright 2009-2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * This file contains C implementations of some of the functions - * needed by the message module. If possible, these implementations - * should be used to speed up message creation. - */ - -#include - -#include "_cbson.h" -#include "buffer.h" - -/* Get an error class from the pymongo.errors module. - * - * Returns a new ref */ -static PyObject* _error(char* name) { - PyObject* error; - PyObject* errors = PyImport_ImportModule("pymongo.errors"); - if (!errors) { - return NULL; - } - error = PyObject_GetAttrString(errors, name); - Py_DECREF(errors); - return error; -} - -/* add a lastError message on the end of the buffer. - * returns 0 on failure */ -static int add_last_error(buffer_t buffer, int request_id, PyObject* args) { - int message_start; - int document_start; - int message_length; - int document_length; - PyObject* key; - PyObject* value; - Py_ssize_t pos = 0; - PyObject* one; - - message_start = buffer_save_space(buffer, 4); - if (message_start == -1) { - PyErr_NoMemory(); - return 0; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00" /* opcode */ - "\x00\x00\x00\x00" /* options */ - "admin.$cmd\x00" /* collection name */ - "\x00\x00\x00\x00" /* skip */ - "\xFF\xFF\xFF\xFF", /* limit (-1) */ - 31)) { - return 0; - } - - /* save space for length */ - document_start = buffer_save_space(buffer, 4); - if (document_start == -1) { - PyErr_NoMemory(); - return 0; - } - - /* getlasterror: 1 */ - one = PyLong_FromLong(1); - if (!write_pair(buffer, "getlasterror", 12, one, 0, 1)) { - Py_DECREF(one); - return 0; - } - Py_DECREF(one); - - /* getlasterror options */ - while (PyDict_Next(args, &pos, &key, &value)) { - if (!decode_and_write_pair(buffer, key, value, 0, 0)) { - return 0; - } - } - - /* EOD */ - if (!buffer_write_bytes(buffer, "\x00", 1)) { - return 0; - } - - message_length = buffer_get_position(buffer) - message_start; - document_length = buffer_get_position(buffer) - document_start; - memcpy(buffer_get_buffer(buffer) + message_start, &message_length, 4); - memcpy(buffer_get_buffer(buffer) + document_start, &document_length, 4); - return 1; -} - -static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - int collection_name_length; - PyObject* docs; - int before, cur_size, max_size = 0; - int list_length; - int i; - unsigned char check_keys; - unsigned char safe; - PyObject* last_error_args; - buffer_t buffer; - int length_location; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#ObbO", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, &safe, &last_error_args)) { - return NULL; - } - - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd2\x07\x00\x00" - "\x00\x00\x00\x00", - 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1)) { - PyMem_Free(collection_name); - buffer_free(buffer); - return NULL; - } - - PyMem_Free(collection_name); - - list_length = PyList_Size(docs); - if (list_length <= 0) { - PyObject* InvalidOperation = _error("InvalidOperation"); - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - buffer_free(buffer); - return NULL; - } - for (i = 0; i < list_length; i++) { - PyObject* doc = PyList_GetItem(docs, i); - before = buffer_get_position(buffer); - if (!write_dict(buffer, doc, check_keys, 1)) { - buffer_free(buffer); - return NULL; - } - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - } - - memcpy(buffer_get_buffer(buffer) + length_location, - buffer_get_buffer(buffer) + buffer_get_position(buffer), 4); - - if (safe) { - if (!add_last_error(buffer, request_id, last_error_args)) { - buffer_free(buffer); - return NULL; - } - } - - /* objectify buffer */ - result = Py_BuildValue("is#i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), - max_size); - buffer_free(buffer); - return result; -} - -static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - int collection_name_length; - int before, cur_size, max_size = 0; - PyObject* doc; - PyObject* spec; - unsigned char multi; - unsigned char upsert; - unsigned char safe; - PyObject* last_error_args; - int options; - buffer_t buffer; - int length_location; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#bbOObO", - "utf-8", - &collection_name, - &collection_name_length, - &upsert, &multi, &spec, &doc, &safe, - &last_error_args)) { - return NULL; - } - - options = 0; - if (upsert) { - options += 1; - } - if (multi) { - options += 2; - } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd1\x07\x00\x00" - "\x00\x00\x00\x00", - 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&options, 4)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - before = buffer_get_position(buffer); - if (!write_dict(buffer, spec, 0, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - max_size = buffer_get_position(buffer) - before; - - before = buffer_get_position(buffer); - if (!write_dict(buffer, doc, 0, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - - PyMem_Free(collection_name); - - memcpy(buffer_get_buffer(buffer) + length_location, - buffer_get_buffer(buffer) + buffer_get_position(buffer), 4); - - if (safe) { - if (!add_last_error(buffer, request_id, last_error_args)) { - buffer_free(buffer); - return NULL; - } - } - - /* objectify buffer */ - result = Py_BuildValue("is#i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), - max_size); - buffer_free(buffer); - return result; -} - -static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - unsigned int options; - char* collection_name = NULL; - int collection_name_length; - int begin, cur_size, max_size = 0; - int num_to_skip; - int num_to_return; - PyObject* query; - PyObject* field_selector = Py_None; - buffer_t buffer; - int length_location; - PyObject* result; - - if (!PyArg_ParseTuple(args, "Iet#iiO|O", - &options, - "utf-8", - &collection_name, - &collection_name_length, - &num_to_skip, &num_to_return, - &query, &field_selector)) { - return NULL; - } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || - !buffer_write_bytes(buffer, (const char*)&options, 4) || - !buffer_write_bytes(buffer, collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&num_to_skip, 4) || - !buffer_write_bytes(buffer, (const char*)&num_to_return, 4)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - begin = buffer_get_position(buffer); - if (!write_dict(buffer, query, 0, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - max_size = buffer_get_position(buffer) - begin; - - if (field_selector != Py_None) { - begin = buffer_get_position(buffer); - if (!write_dict(buffer, field_selector, 0, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - cur_size = buffer_get_position(buffer) - begin; - max_size = (cur_size > max_size) ? cur_size : max_size; - } - - PyMem_Free(collection_name); - - memcpy(buffer_get_buffer(buffer) + length_location, - buffer_get_buffer(buffer) + buffer_get_position(buffer), 4); - - /* objectify buffer */ - result = Py_BuildValue("is#i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), - max_size); - buffer_free(buffer); - return result; -} - -static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - int collection_name_length; - int num_to_return; - long long cursor_id; - buffer_t buffer; - int length_location; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#iL", - "utf-8", - &collection_name, - &collection_name_length, - &num_to_return, - &cursor_id)) { - return NULL; - } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd5\x07\x00\x00" - "\x00\x00\x00\x00", 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&num_to_return, 4) || - !buffer_write_bytes(buffer, (const char*)&cursor_id, 8)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - PyMem_Free(collection_name); - - memcpy(buffer_get_buffer(buffer) + length_location, - buffer_get_buffer(buffer) + buffer_get_position(buffer), 4); - - /* objectify buffer */ - result = Py_BuildValue("is#", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer)); - buffer_free(buffer); - return result; -} - -static PyMethodDef _CMessageMethods[] = { - {"_insert_message", _cbson_insert_message, METH_VARARGS, - "create an insert message to be sent to MongoDB"}, - {"_update_message", _cbson_update_message, METH_VARARGS, - "create an update message to be sent to MongoDB"}, - {"_query_message", _cbson_query_message, METH_VARARGS, - "create a query message to be sent to MongoDB"}, - {"_get_more_message", _cbson_get_more_message, METH_VARARGS, - "create a get more message to be sent to MongoDB"}, - {NULL, NULL, 0, NULL} -}; - -PyMODINIT_FUNC init_cmessage(void) { - PyObject *m; - - /* TODO is this necessary? - * - * We import _cbson here to make sure that it's init function has - * been run. - */ - m = PyImport_ImportModule("bson._cbson"); - Py_DECREF(m); - - m = Py_InitModule("_cmessage", _CMessageMethods); - if (m == NULL) { - return; - } -} diff -Nru pymongo-1.11/pymongo/code.py pymongo-1.7/pymongo/code.py --- pymongo-1.11/pymongo/code.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/code.py 2010-06-04 18:05:46.000000000 +0000 @@ -12,4 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.code import * +"""Tools for representing JavaScript code to be evaluated by MongoDB. +""" + + +class Code(str): + """JavaScript code to be evaluated by MongoDB. + + Raises :class:`TypeError` if `code` is not an instance of + :class:`basestring` or `scope` is not an instance of + :class:`dict`. + + :Parameters: + - `code`: string containing JavaScript code to be evaluated + - `scope` (optional): dictionary representing the scope in which + `code` should be evaluated - a mapping from identifiers (as + strings) to values + """ + + def __new__(cls, code, scope=None): + if not isinstance(code, basestring): + raise TypeError("code must be an instance of basestring") + + if scope is None: + try: + scope = code.scope + except AttributeError: + scope = {} + if not isinstance(scope, dict): + raise TypeError("scope must be an instance of dict") + + self = str.__new__(cls, code) + self.__scope = scope + return self + + @property + def scope(self): + """Scope dictionary for this instance. + """ + return self.__scope + + def __repr__(self): + return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + + def __eq__(self, other): + if isinstance(other, Code): + return (self.__scope, str(self)) == (other.__scope, str(other)) + return False diff -Nru pymongo-1.11/pymongo/collection.py pymongo-1.7/pymongo/collection.py --- pymongo-1.11/pymongo/collection.py 2011-04-26 18:47:02.000000000 +0000 +++ pymongo-1.7/pymongo/collection.py 2010-06-17 15:37:47.000000000 +0000 @@ -16,12 +16,12 @@ import warnings -from bson.code import Code -from bson.son import SON from pymongo import (helpers, message) +from pymongo.code import Code from pymongo.cursor import Cursor -from pymongo.errors import InvalidName, InvalidOperation +from pymongo.errors import InvalidName +from pymongo.son import SON _ZERO = "\x00\x00\x00\x00" @@ -88,7 +88,7 @@ raise InvalidName("collection names must not " "contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " + raise InvalidName("collecion names must not start " "or end with '.': %r" % name) if "\x00" in name: raise InvalidName("collection names must not contain the " @@ -165,42 +165,24 @@ """ return self.__database - def save(self, to_save, manipulate=True, safe=False, **kwargs): + def save(self, to_save, manipulate=True, safe=False): """Save a document in this collection. - If `to_save` already has an ``"_id"`` then an :meth:`update` - (upsert) operation is performed and any existing document with - that ``"_id"`` is overwritten. Otherwise an :meth:`insert` - operation is performed. In this case if `manipulate` is ``True`` - an ``"_id"`` will be added to `to_save` and this method returns - the ``"_id"`` of the saved document. If `manipulate` is ``False`` - the ``"_id"`` will be added by the server but this method will - return ``None``. - - Raises :class:`TypeError` if `to_save` is not an instance of - :class:`dict`. If `safe` is ``True`` then the save will be - checked for errors, raising - :class:`~pymongo.errors.OperationFailure` if one - occurred. Safe inserts wait for a response from the database, - while normal inserts do not. - - Any additional keyword arguments imply ``safe=True``, and will - be used as options for the resultant `getLastError` - command. For example, to wait for replication to 3 nodes, pass - ``w=3``. + If `to_save` already has an '_id' then an update (upsert) operation + is performed and any existing document with that _id is overwritten. + Otherwise an '_id' will be added to `to_save` and an insert operation + is performed. Returns the _id of the saved document. + + Raises TypeError if to_save is not an instance of dict. If `safe` + is True then the save will be checked for errors, raising + OperationFailure if one occurred. Safe inserts wait for a + response from the database, while normal inserts do not. Returns the + _id of the saved document. :Parameters: - - `to_save`: the document to be saved - - `manipulate` (optional): manipulate the document before - saving it? + - `to_save`: the SON object to be saved + - `manipulate` (optional): manipulate the SON object before saving it - `safe` (optional): check that the save succeeded? - - `**kwargs` (optional): any additional arguments imply - ``safe=True``, and will be used as options for the - `getLastError` command - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. .. mongodoc:: insert """ @@ -208,52 +190,31 @@ raise TypeError("cannot save object of type %s" % type(to_save)) if "_id" not in to_save: - return self.insert(to_save, manipulate, safe, **kwargs) + return self.insert(to_save, manipulate, safe) else: self.update({"_id": to_save["_id"]}, to_save, True, - manipulate, safe, **kwargs) + manipulate, safe) return to_save.get("_id", None) def insert(self, doc_or_docs, - manipulate=True, safe=False, check_keys=True, **kwargs): + manipulate=True, safe=False, check_keys=True): """Insert a document(s) into this collection. - If `manipulate` is ``True``, the document(s) are manipulated using - any :class:`~pymongo.son_manipulator.SONManipulator` instances - that have been added to this :class:`~pymongo.database.Database`. - In this case an ``"_id"`` will be added if the document(s) does - not already contain one and the ``"id"`` (or list of ``"_id"`` - values for more than one document) will be returned. - If `manipulate` is ``False`` and the document(s) does not include - an ``"_id"`` one will be added by the server. The server - does not return the ``"_id"`` it created so ``None`` is returned. - - If `safe` is ``True`` then the insert will be checked for - errors, raising :class:`~pymongo.errors.OperationFailure` if - one occurred. Safe inserts wait for a response from the - database, while normal inserts do not. - - Any additional keyword arguments imply ``safe=True``, and - will be used as options for the resultant `getLastError` - command. For example, to wait for replication to 3 nodes, pass - ``w=3``. + If manipulate is set the document(s) are manipulated using any + SONManipulators that have been added to this database. Returns the _id + of the inserted document or a list of _ids of the inserted documents. + If the document(s) does not already contain an '_id' one will be added. + If `safe` is True then the insert will be checked for errors, raising + OperationFailure if one occurred. Safe inserts wait for a response from + the database, while normal inserts do not. :Parameters: - - `doc_or_docs`: a document or list of documents to be - inserted - - `manipulate` (optional): manipulate the documents before - inserting? + - `doc_or_docs`: a SON object or list of SON objects to be inserted + - `manipulate` (optional): manipulate the documents before inserting? - `safe` (optional): check that the insert succeeded? - `check_keys` (optional): check if keys start with '$' or - contain '.', raising :class:`~pymongo.errors.InvalidName` - in either case - - `**kwargs` (optional): any additional arguments imply - ``safe=True``, and will be used as options for the - `getLastError` command - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. + contain '.', raising `pymongo.errors.InvalidName` in either case + .. versionchanged:: 1.1 Bulk insert works with any iterable @@ -268,17 +229,14 @@ if manipulate: docs = [self.__database._fix_incoming(doc, self) for doc in docs] - if kwargs: - safe = True self.__database.connection._send_message( - message.insert(self.__full_name, docs, - check_keys, safe, kwargs), safe) + message.insert(self.__full_name, docs, check_keys, safe), safe) ids = [doc.get("_id", None) for doc in docs] return return_one and ids[0] or ids - def update(self, spec, document, upsert=False, manipulate=False, - safe=False, multi=False, **kwargs): + def update(self, spec, document, + upsert=False, manipulate=False, safe=False, multi=False): """Update a document(s) in this collection. Raises :class:`TypeError` if either `spec` or `document` is @@ -308,20 +266,15 @@ If `safe` is ``True`` returns the response to the *lastError* command. Otherwise, returns ``None``. - Any additional keyword arguments imply ``safe=True``, and will - be used as options for the resultant `getLastError` - command. For example, to wait for replication to 3 nodes, pass - ``w=3``. - :Parameters: - - `spec`: a ``dict`` or :class:`~bson.son.SON` instance + - `spec`: a ``dict`` or :class:`~pymongo.son.SON` instance specifying elements which must be present for a document to be updated - - `document`: a ``dict`` or :class:`~bson.son.SON` + - `document`: a ``dict`` or :class:`~pymongo.son.SON` instance specifying the document to be used for the update or (in the case of an upsert) insert - see docs on MongoDB `update modifiers`_ - - `upsert` (optional): perform an upsert if ``True`` + - `upsert` (optional): perform an `upsert`_ if ``True`` - `manipulate` (optional): manipulate the document before updating? If ``True`` all instances of :mod:`~pymongo.son_manipulator.SONManipulator` added to @@ -334,19 +287,14 @@ might eventually change to ``True``. It is recommended that you specify this argument explicitly for all update operations in order to prepare your code for that change. - - `**kwargs` (optional): any additional arguments imply - ``safe=True``, and will be used as options for the - `getLastError` command - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. + .. versionchanged:: 1.4 Return the response to *lastError* if `safe` is ``True``. .. versionadded:: 1.1.1 The `multi` parameter. .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating + .. _upsert: http://www.mongodb.org/display/DOCS/Updating#Updating-Upserts .. mongodoc:: update """ @@ -360,26 +308,11 @@ if upsert and manipulate: document = self.__database._fix_incoming(document, self) - if kwargs: - safe = True - return self.__database.connection._send_message( message.update(self.__full_name, upsert, multi, - spec, document, safe, kwargs), safe) + spec, document, safe), safe) - def drop(self): - """Alias for :meth:`~pymongo.database.Database.drop_collection`. - - The following two calls are equivalent: - - >>> db.foo.drop() - >>> db.drop_collection("foo") - - .. versionadded:: 1.8 - """ - self.__database.drop_collection(self.__name) - - def remove(self, spec_or_id=None, safe=False, **kwargs): + def remove(self, spec_or_id=None, safe=False): """Remove a document(s) from this collection. .. warning:: Calls to :meth:`remove` should be performed with @@ -399,25 +332,16 @@ If `safe` is ``True`` returns the response to the *lastError* command. Otherwise, returns ``None``. - Any additional keyword arguments imply ``safe=True``, and will - be used as options for the resultant `getLastError` - command. For example, to wait for replication to 3 nodes, pass - ``w=3``. - :Parameters: - `spec_or_id` (optional): a dictionary specifying the documents to be removed OR any other type specifying the value of ``"_id"`` for the document to be removed - `safe` (optional): check that the remove succeeded? - - `**kwargs` (optional): any additional arguments imply - ``safe=True``, and will be used as options for the - `getLastError` command - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword arguments. - .. versionchanged:: 1.7 Accept any type other than a ``dict`` - instance for removal by ``"_id"``, not just - :class:`~bson.objectid.ObjectId` instances. + + .. versionchanged:: 1.7 + Accept any type other than a ``dict`` instance for removal + by ``"_id"``, not just :class:`~pymongo.objectid.ObjectId` + instances. .. versionchanged:: 1.4 Return the response to *lastError* if `safe` is ``True``. .. versionchanged:: 1.2 @@ -434,11 +358,8 @@ if not isinstance(spec_or_id, dict): spec_or_id = {"_id": spec_or_id} - if kwargs: - safe = True - return self.__database.connection._send_message( - message.delete(self.__full_name, spec_or_id, safe, kwargs), safe) + message.delete(self.__full_name, spec_or_id, safe), safe) def find_one(self, spec_or_id=None, *args, **kwargs): """Get a single document from the database. @@ -464,9 +385,10 @@ Allow passing any of the arguments that are valid for :meth:`find`. - .. versionchanged:: 1.7 Accept any type other than a ``dict`` - instance as an ``"_id"`` query, not just - :class:`~bson.objectid.ObjectId` instances. + .. versionchanged:: 1.7 + Accept any type other than a ``dict`` instance as an + ``"_id"`` query, not just + :class:`~pymongo.objectid.ObjectId` instances. """ if spec_or_id is not None and not isinstance(spec_or_id, dict): spec_or_id = {"_id": spec_or_id} @@ -500,7 +422,8 @@ result set - `fields` (optional): a list of field names that should be returned in the result set ("_id" will always be - included), or a dict specifying the fields to return + included), or a dict specifying the `fields to return + `_ - `skip` (optional): the number of documents to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to @@ -515,7 +438,7 @@ returned, or objects missed, which were present at both the start and end of the query's execution. For details, see the `snapshot documentation - `_. + `_. - `tailable` (optional): the result of this find call will be a tailable cursor - tailable cursors aren't closed when the last data is retrieved but are kept open and the @@ -532,18 +455,10 @@ - `as_class` (optional): class to use for documents in the query result (default is :attr:`~pymongo.connection.Connection.document_class`) - - `slave_okay` (optional): if True, allows this query to - be run against a replica secondary. - - `network_timeout` (optional): specify a timeout to use for - this query, which will override the - :class:`~pymongo.connection.Connection`-level default .. note:: The `max_scan` parameter requires server version **>= 1.5.1** - .. versionadded:: 1.8 - The `network_timeout` parameter. - .. versionadded:: 1.7 The `sort`, `max_scan` and `as_class` parameters. @@ -581,8 +496,8 @@ >>> my_collection.create_index("mike") - For a compound index on ``'mike'`` descending and ``'eliot'`` - ascending we need to use a list of tuples: + For a `compound index`_ on ``'mike'`` descending and + ``'eliot'`` ascending we need to use a list of tuples: >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... ("eliot", pymongo.ASCENDING)]) @@ -620,6 +535,8 @@ .. seealso:: :meth:`ensure_index` + .. _compound index: http://www.mongodb.org/display/DOCS/Indexes#Indexes-CompoundKeysIndexes + .. mongodoc:: indexes """ keys = helpers._index_list(key_or_list) @@ -641,13 +558,11 @@ index.update(kwargs) - self.__database.system.indexes.insert(index, manipulate=False, - check_keys=False, - safe=True) - self.__database.connection._cache_index(self.__database.name, self.__name, name, ttl) + self.__database.system.indexes.insert(index, manipulate=False, + check_keys=False) return name def ensure_index(self, key_or_list, deprecated_unique=None, @@ -686,8 +601,6 @@ - `unique`: should this index guarantee uniqueness? - `dropDups` or `drop_dups`: should we drop duplicates during index creation when creating a unique index? - - `background`: if this index should be created in the - background - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` @@ -829,9 +742,9 @@ - ``None`` to use the entire document as a key. - A :class:`list` of keys (each a :class:`basestring`) to group by. - - A :class:`basestring` or :class:`~bson.code.Code` instance - containing a JavaScript function to be applied to each - document, returning the key to group by. + - A :class:`basestring` or :class:`~pymongo.code.Code` instance + containing a JavaScript function to be applied to each document, + returning the key to group by. :Parameters: - `key`: fields to group by (see above description) @@ -921,8 +834,7 @@ """ return self.find().distinct(key) - def map_reduce(self, map, reduce, out, merge_output=False, - reduce_output=False, full_response=False, **kwargs): + def map_reduce(self, map, reduce, full_response=False, **kwargs): """Perform a map/reduce operation on this collection. If `full_response` is ``False`` (default) returns a @@ -933,22 +845,13 @@ :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - - `out` (required): output collection name - - `merge_output` (optional): Merge output into `out`. If the same - key exists in both the result set and the existing output - collection, the new key will overwrite the existing key - - `reduce_output` (optional): If documents exist for a given key - in the result set and in the existing output collection, then a - reduce operation (using the specified reduce function) will be - performed on the two values and the result will be written to - the output collection - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: - >>> db.test.map_reduce(map, reduce, "myresults", limit=2) + >>> db.test.map_reduce(map, reduce, limit=2) .. note:: Requires server version **>= 1.1.1** @@ -960,125 +863,11 @@ .. mongodoc:: mapreduce """ - if not isinstance(out, basestring): - raise TypeError("'out' must be an instance of basestring") - - if merge_output and reduce_output: - raise InvalidOperation("Can't do both merge" - " and re-reduce of output.") - - if merge_output: - out_conf = {"merge": out} - elif reduce_output: - out_conf = {"reduce": out} - else: - out_conf = out - response = self.__database.command("mapreduce", self.__name, - map=map, reduce=reduce, - out=out_conf, **kwargs) + map=map, reduce=reduce, **kwargs) if full_response: return response - else: - return self.__database[response["result"]] - - def inline_map_reduce(self, map, reduce, full_response=False, **kwargs): - """Perform an inline map/reduce operation on this collection. - - Perform the map/reduce operation on the server in RAM. A result - collection is not created. The result set is returned as a list - of documents. - - If `full_response` is ``False`` (default) returns the - result documents in a list. Otherwise, returns the full - response from the server to the `map reduce command`_. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.inline_map_reduce(map, reduce, limit=2) - - .. note:: Requires server version **>= 1.7.4** - - .. versionadded:: 1.10 - """ - - response = self.__database.command("mapreduce", self.__name, - map=map, reduce=reduce, - out={"inline": 1}, **kwargs) - - if full_response: - return response - else: - return response.get("results") - - def find_and_modify(self, query={}, update=None, upsert=False, **kwargs): - """Update and return an object. - - This is a thin wrapper around the findAndModify_ command. The - positional arguments are designed to match the first three arguments - to :meth:`update` however most options should be passed as named - parameters. Either `update` or `remove` arguments are required, all - others are optional. - - Returns either the object before or after modification based on `new` - parameter. If no objects match the `query` and `upsert` is false, - returns ``None``. If upserting and `new` is false, returns ``{}``. - - :Parameters: - - `query`: filter for the update (default ``{}``) - - `sort`: priority if multiple objects match (default ``{}``) - - `update`: see second argument to :meth:`update` (no default) - - `remove`: remove rather than updating (default ``False``) - - `new`: return updated rather than original object - (default ``False``) - - `fields`: see second argument to :meth:`find` (default all) - - `upsert`: insert if object doesn't exist (default ``False``) - - `**kwargs`: any other options the findAndModify_ command - supports can be passed here. - - - .. mongodoc:: findAndModify - - .. _findAndModify: http://dochub.mongodb.org/core/findAndModify - - .. note:: Requires server version **>= 1.3.0** - - .. versionadded:: 1.10 - """ - if (not update and not kwargs.get('remove', None)): - raise ValueError("Must either update or remove") - - if (update and kwargs.get('remove', None)): - raise ValueError("Can't do both update and remove") - - # No need to include empty args - if query: - kwargs['query'] = query - if update: - kwargs['update'] = update - if upsert: - kwargs['upsert'] = upsert - - no_obj_error = "No matching object found" - - out = self.__database.command("findAndModify", self.__name, - allowable_errors=[no_obj_error], **kwargs) - - if not out['ok']: - if out["errmsg"] == no_obj_error: - return None - else: - # Should never get here b/c of allowable_errors - raise ValueError("Unexpected Error: %s" % (out,)) - - return out.get('value') + return self.__database[response["result"]] def __iter__(self): return self diff -Nru pymongo-1.11/pymongo/connection.py pymongo-1.7/pymongo/connection.py --- pymongo-1.11/pymongo/connection.py 2011-04-25 21:01:27.000000000 +0000 +++ pymongo-1.7/pymongo/connection.py 2010-06-17 15:37:47.000000000 +0000 @@ -14,10 +14,11 @@ """Tools for connecting to MongoDB. +To connect to a single instance of MongoDB use :class:`Connection`. To +connect to a replica pair use :meth:`~Connection.paired`. + .. seealso:: Module :mod:`~pymongo.master_slave_connection` for - connecting to master-slave clusters, and - :doc:`/examples/replica_set` for an example of how to connect to a - replica set. + connecting to master-slave clusters. To get a :class:`~pymongo.database.Database` instance from a :class:`Connection` use either dictionary-style or attribute-style @@ -35,11 +36,9 @@ import datetime import os -import select import socket import struct import threading -import time import warnings from pymongo import (database, @@ -50,152 +49,35 @@ ConfigurationError, ConnectionFailure, DuplicateKeyError, - InvalidDocument, InvalidURI, OperationFailure) - _CONNECT_TIMEOUT = 20.0 -def _partition_ipv6(source): - if source.find(']') == -1: - raise InvalidURI("an IPv6 address literal must be " - "enclosed in '[' and ']' characters.") - i = source.find(']:') - if i == -1: - return (source[1:-1], None) - return (source[1: i], source[i + 2:]) - - -def _partition(source, sub): - """Our own string partitioning method. - - Splits `source` on `sub`. - """ - i = source.find(sub) - if i == -1: - return (source, None) - return (source[:i], source[i + len(sub):]) - - -def _str_to_node(string, default_port=27017): - """Convert a string to a node tuple. - - "localhost:27017" -> ("localhost", 27017) - """ - # IPv6 literal - if string[0] == '[': - host, port = _partition_ipv6(string) - elif string.count(':') > 1 or string.find(']') != -1: - raise InvalidURI("an IPv6 address literal must be " - "enclosed in '[' and ']' characters.") - else: - host, port = _partition(string, ":") - if port: - port = int(port) - else: - port = default_port - return (host, port) - - -def _parse_uri(uri, default_port=27017): - """MongoDB URI parser. - """ - - if uri.startswith("mongodb://"): - uri = uri[len("mongodb://"):] - elif "://" in uri: - raise InvalidURI("Invalid uri scheme: %s" % _partition(uri, "://")[0]) - - (hosts, namespace) = _partition(uri, "/") - - raw_options = None - if namespace: - (namespace, raw_options) = _partition(namespace, "?") - if namespace.find(".") < 0: - db = namespace - collection = None - else: - (db, collection) = namespace.split(".", 1) - else: - db = None - collection = None - - username = None - password = None - if "@" in hosts: - (auth, hosts) = _partition(hosts, "@") - - if ":" not in auth: - raise InvalidURI("auth must be specified as " - "'username:password@'") - (username, password) = _partition(auth, ":") - - host_list = [] - for host in hosts.split(","): - if not host: - raise InvalidURI("empty host (or extra comma in host list)") - host_list.append(_str_to_node(host, default_port)) - - options = {} - if raw_options: - raw_options = raw_options.lower() - and_idx = raw_options.find("&") - semi_idx = raw_options.find(";") - if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Cannot mix & and ; for option separators.") - elif and_idx >= 0: - options = dict([kv.split("=") for kv in raw_options.split("&")]) - elif semi_idx >= 0: - options = dict([kv.split("=") for kv in raw_options.split(";")]) - elif raw_options.find("="): - options = dict([raw_options.split("=")]) - - return (host_list, db, username, password, collection, options) - - -def _closed(sock): - """Return True if we know socket has been closed, False otherwise. - """ - rd, _, _ = select.select([sock], [], [], 0) - try: - return len(rd) and sock.recv() == "" - except: - return True - - -class _Pool(threading.local): +class Pool(threading.local): """A simple connection pool. - Uses thread-local socket per thread. By calling return_socket() a - thread can return a socket to the pool. + Uses thread-local socket per thread. By calling return_socket() a thread + can return a socket to the pool. """ # Non thread-locals - __slots__ = ["sockets", "socket_factory", "pool_size", "pid"] - - # thread-local default + __slots__ = ["sockets", "socket_factory"] sock = None - def __init__(self, socket_factory, pool_size): - self.pid = os.getpid() - self.pool_size = pool_size + def __init__(self, socket_factory): self.socket_factory = socket_factory if not hasattr(self, "sockets"): self.sockets = [] def socket(self): - # We use the pid here to avoid issues with fork / multiprocessing. - # See test.test_connection:TestConnection.test_fork for an example of - # what could go wrong otherwise + # we store the pid here to avoid issues with fork / + # multiprocessing - see + # test.test_connection:TestConnection.test_fork for an example + # of what could go wrong otherwise pid = os.getpid() - if pid != self.pid: - self.sock = None - self.sockets = [] - self.pid = pid - if self.sock is not None and self.sock[0] == pid: return self.sock[1] @@ -208,13 +90,7 @@ def return_socket(self): if self.sock is not None and self.sock[0] == os.getpid(): - # There's a race condition here, but we deliberately - # ignore it. It means that if the pool_size is 10 we - # might actually keep slightly more than that. - if len(self.sockets) < self.pool_size: - self.sockets.append(self.sock[1]) - else: - self.sock[1].close() + self.sockets.append(self.sock[1]) self.sock = None @@ -225,66 +101,40 @@ HOST = "localhost" PORT = 27017 - __max_bson_size = 4 * 1024 * 1024 - - def __init__(self, host=None, port=None, max_pool_size=10, - slave_okay=False, network_timeout=None, - document_class=dict, tz_aware=False, _connect=True): + def __init__(self, host=None, port=None, pool_size=None, + auto_start_request=None, timeout=None, slave_okay=False, + network_timeout=None, document_class=dict, _connect=True): """Create a new connection to a single MongoDB instance at *host:port*. - The resultant connection object has connection-pooling built - in. It also performs auto-reconnection when necessary. If an - operation fails because of a connection error, + The resultant connection object has connection-pooling built in. It + also performs auto-reconnection when necessary. If an operation fails + because of a connection error, :class:`~pymongo.errors.ConnectionFailure` is raised. If auto-reconnection will be performed, - :class:`~pymongo.errors.AutoReconnect` will be - raised. Application code should handle this exception - (recognizing that the operation failed) and then continue to - execute. - - Raises :class:`TypeError` if port is not an instance of - ``int``. Raises :class:`~pymongo.errors.ConnectionFailure` if - the connection cannot be made. - - The `host` parameter can be a full `mongodb URI - `_, in addition to - a simple hostname. It can also be a list of hostnames or - URIs. Any port specified in the host string(s) will override - the `port` parameter. If multiple mongodb URIs containing - database or auth information are passed, the last database, - username, and password present will be used. - - :Parameters: - - `host` (optional): hostname or IP address of the - instance to connect to, or a mongodb URI, or a list of - hostnames / mongodb URIs. If `host` is an IPv6 literal - it must be enclosed in '[' and ']' characters following - the RFC2732 URL syntax (e.g. '[::1]' for localhost) + :class:`~pymongo.errors.AutoReconnect` will be raised. Application code + should handle this exception (recognizing that the operation failed) + and then continue to execute. + + Raises :class:`TypeError` if host is not an instance of string or port + is not an instance of ``int``. Raises + :class:`~pymongo.errors.ConnectionFailure` if the connection cannot be + made. + + :Parameters: + - `host` (optional): hostname or IPv4 address of the instance to + connect to - `port` (optional): port number on which to connect - - `max_pool_size` (optional): The maximum size limit for - the connection pool. + - `pool_size` (optional): DEPRECATED + - `auto_start_request` (optional): DEPRECATED - `slave_okay` (optional): is it okay to connect directly to and perform queries on a slave instance + - `timeout` (optional): DEPRECATED - `network_timeout` (optional): timeout (in seconds) to use for socket operations - default is no timeout - `document_class` (optional): default class to use for documents returned from queries on this connection - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`Connection` will be timezone - aware (otherwise they will be naive) .. seealso:: :meth:`end_request` - .. versionchanged:: 1.10.1+ - Added `max_pool_size`. Completely removed previously deprecated - `pool_size`, `auto_start_request` and `timeout` parameters. - .. versionchanged:: 1.8 - The `host` parameter can now be a full `mongodb URI - `_, in addition - to a simple hostname. It can also be a list of hostnames or - URIs. - .. versionadded:: 1.8 - The `tz_aware` parameter. .. versionadded:: 1.7 The `document_class` parameter. .. versionchanged:: 1.4 @@ -297,71 +147,36 @@ """ if host is None: host = self.HOST - if isinstance(host, basestring): - host = [host] if port is None: port = self.PORT + + if pool_size is not None: + warnings.warn("The pool_size parameter to Connection is " + "deprecated", DeprecationWarning) + if auto_start_request is not None: + warnings.warn("The auto_start_request parameter to Connection " + "is deprecated", DeprecationWarning) + if timeout is not None: + warnings.warn("The timeout parameter to Connection is deprecated", + DeprecationWarning) + + if not isinstance(host, basestring): + raise TypeError("host must be an instance of basestring") if not isinstance(port, int): raise TypeError("port must be an instance of int") - nodes = set() - database = None - username = None - password = None - collection = None - options = {} - for uri in host: - (n, db, u, p, coll, opts) = _parse_uri(uri, port) - nodes.update(n) - database = db or database - username = u or username - password = p or password - collection = coll or collection - options = opts or options - if not nodes: - raise ConfigurationError("need to specify at least one host") - self.__nodes = nodes - if database and username is None: - raise InvalidURI("cannot specify database without " - "a username and password") - self.__host = None self.__port = None - if "maxpoolsize" in options: - self.__max_pool_size = options['maxpoolsize'] - if not self.__max_pool_size.isdigit(): - raise TypeError("maxPoolSize must be an integer >= 0.") - self.__max_pool_size = int(self.__max_pool_size) - else: - self.__max_pool_size = max_pool_size - if not isinstance(self.__max_pool_size, int): - raise TypeError("max_pool_size must be an instance of int.") - if self.__max_pool_size < 0: - raise ValueError("the maximum pool size must be >= 0") - - if "slaveok" in options: - self.__slave_okay = (options['slaveok'][0].upper() == 'T') - else: - self.__slave_okay = slave_okay - - if slave_okay and len(self.__nodes) > 1: - raise ConfigurationError("cannot specify slave_okay for a paired " - "or replica set connection") - - # TODO - Support using other options like w and fsync from URI - self.__options = options - # TODO - Support setting the collection from URI like the Java driver - self.__collection = collection + self.__nodes = [(host, port)] + self.__slave_okay = slave_okay self.__cursor_manager = CursorManager(self) - self.__pool = _Pool(self.__connect, self.__max_pool_size) - self.__last_checkout = time.time() + self.__pool = Pool(self.__connect) self.__network_timeout = network_timeout self.__document_class = document_class - self.__tz_aware = tz_aware # cache of existing indexes used by ensure_index ops self.__index_cache = {} @@ -369,38 +184,150 @@ if _connect: self.__find_master() - if username: - database = database or "admin" - if not self[database].authenticate(username, password): - raise ConfigurationError("authentication failed") + @staticmethod + def __partition(source, sub): + i = source.find(sub) + if i == -1: + return (source, None) + + return (source[:i], source[i + len(sub):]) + + @staticmethod + def _parse_uri(uri): + info = {} + + if uri.startswith("mongodb://"): + uri = uri[len("mongodb://"):] + elif "://" in uri: + raise InvalidURI("Invalid uri scheme: %s" + % Connection.__partition(uri, "://")[0]) + + (hosts, database) = Connection.__partition(uri, "/") + + if not database: + database = None + + username = None + password = None + if "@" in hosts: + (auth, hosts) = Connection.__partition(hosts, "@") + + if ":" not in auth: + raise InvalidURI("auth must be specified as " + "'username:password@'") + (username, password) = Connection.__partition(auth, ":") + + host_list = [] + for host in hosts.split(","): + if not host: + raise InvalidURI("empty host (or extra comma in host list)") + (hostname, port) = Connection.__partition(host, ":") + if port: + port = int(port) + else: + port = 27017 + host_list.append((hostname, port)) + + return (host_list, database, username, password) @classmethod def from_uri(cls, uri="mongodb://localhost", **connection_args): - """DEPRECATED Can pass a mongodb URI directly to Connection() instead. + """Connect to a MongoDB instance(s) using the mongodb URI + scheme. + + The format for a MongoDB URI is documented `here + `_. Raises + :class:`~pymongo.errors.InvalidURI` when given an invalid URI. + + :Parameters: + + - `uri`: URI identifying the MongoDB instance(s) to connect + to + + The remaining keyword arguments are the same as those accepted + by :meth:`~Connection`. - .. versionchanged:: 1.8 - DEPRECATED .. versionadded:: 1.5 """ - warnings.warn("Connection.from_uri is deprecated - can pass " - "URIs to Connection() now", DeprecationWarning) - return cls(uri, **connection_args) + (nodes, database, username, password) = Connection._parse_uri(uri) + if database and username is None: + raise InvalidURI("cannot specify database without " + "a username and password") + + if len(nodes) == 1: + connection = cls(*nodes[0], **connection_args) + + elif len(nodes) == 2: + connection = cls.paired(*nodes, **connection_args) + + else: + raise InvalidURI("Connecting to more than 2 nodes " + "is not currently supported") + + if username: + database = database or "admin" + if not connection[database].authenticate(username, password): + raise InvalidURI("authentication failed") + + return connection + + def __pair_with(self, host, port): + """Pair this connection with a Mongo instance running on host:port. + + Raises TypeError if host is not an instance of string or port is not an + instance of int. Raises ConnectionFailure if the connection cannot be + made. + + :Parameters: + - `host`: the hostname or IPv4 address of the instance to + pair with + - `port`: the port number on which to connect + """ + if not isinstance(host, str): + raise TypeError("host must be an instance of str") + if not isinstance(port, int): + raise TypeError("port must be an instance of int") + self.__nodes.append((host, port)) + + self.__find_master() @classmethod def paired(cls, left, right=None, **connection_args): - """DEPRECATED Can pass a list of hostnames to Connection() instead. + """Open a new paired connection to Mongo. - .. versionchanged:: 1.8 - DEPRECATED + Raises :class:`TypeError` if either `left` or `right` is not a tuple of + the form ``(host, port)``. Raises :class:`~pymongo.ConnectionFailure` + if the connection cannot be made. + + :Parameters: + - `left`: ``(host, port)`` pair for the left MongoDB instance + - `right` (optional): ``(host, port)`` pair for the right MongoDB + instance + + The remaining keyword arguments are the same as those accepted + by :meth:`~Connection`. """ - warnings.warn("Connection.paired is deprecated - can pass multiple " - "hostnames to Connection() now", DeprecationWarning) - if isinstance(left, str) or isinstance(right, str): - raise TypeError("arguments to paired must be tuples") if right is None: right = (cls.HOST, cls.PORT) - return cls([":".join(map(str, left)), ":".join(map(str, right))], - **connection_args) + + for param in ('pool_size', 'auto_start_request', 'timeout'): + if param in connection_args: + warnings.warn("The %s parameter to Connection.paired is " + "deprecated" % param, DeprecationWarning) + + if "slave_okay" in connection_args: + raise TypeError("cannot specify slave_okay on paired connections") + + connection_args['_connect'] = False + + connection = cls(left[0], left[1], **connection_args) + connection.__pair_with(*right) + return connection + + def __master(self, sock): + """Is this socket connected to a master server? + """ + return self["admin"].command("ismaster", _sock=sock)["ismaster"] def _cache_index(self, database, collection, index, ttl): """Add an index to the index cache for ensure_index operations. @@ -475,26 +402,6 @@ return self.__port @property - def max_pool_size(self): - """The maximum pool size limit set for this connection. - - .. versionadded:: 1.10.1+ - """ - return self.__max_pool_size - - @property - def nodes(self): - """List of all known nodes. - - Includes both nodes specified when the :class:`Connection` was - created, as well as nodes discovered through the replica set - discovery mechanism. - - .. versionadded:: 1.8 - """ - return self.__nodes - - @property def slave_okay(self): """Is it okay for this connection to connect directly to a slave? """ @@ -508,152 +415,62 @@ document_class = property(get_document_class, set_document_class, doc="""Default class to use for documents - returned on this connection. + returned from queries on this connection. .. versionadded:: 1.7 """) - @property - def tz_aware(self): - """Does this connection return timezone-aware datetimes? - - See the `tz_aware` parameter to :meth:`Connection`. - - .. versionadded:: 1.8 - """ - return self.__tz_aware - - @property - def max_bson_size(self): - """Return the maximum size BSON object the connected server - accepts in bytes. Defaults to 4MB in server < 1.7.4. - - .. versionadded:: 1.10 - """ - return self.__max_bson_size - - def __add_hosts_and_get_primary(self, response): - if "hosts" in response: - self.__nodes.update([_str_to_node(h) for h in response["hosts"]]) - if "primary" in response: - return _str_to_node(response["primary"]) - return False - - def __try_node(self, node): - self.disconnect() - self.__host, self.__port = node - try: - response = self.admin.command("ismaster") - self.end_request() - - if "maxBsonObjectSize" in response: - self.__max_bson_size = response["maxBsonObjectSize"] - - # If __slave_okay is True and we've only been given one node - # assume this should be a direct connection and don't try to - # discover other nodes. - if len(self.__nodes) == 1 and self.__slave_okay: - if response["ismaster"]: - return True - return False - - primary = self.__add_hosts_and_get_primary(response) - if response["ismaster"]: - return True - return primary - except: - self.end_request() - return None - def __find_master(self): """Create a new socket and use it to figure out who the master is. Sets __host and __port so that :attr:`host` and :attr:`port` - will return the address of the master. Also (possibly) updates - any replSet information. + will return the address of the master. """ - # Special case the first node to try to get the primary or any - # additional hosts from a replSet: - first = iter(self.__nodes).next() - - primary = self.__try_node(first) - if primary is True: - return first - - # no network error - if self.__slave_okay and primary is not None: - return first - - # Wasn't the first node, but we got a primary - let's try it: - tried = [first] - if primary: - if self.__try_node(primary) is True: - return primary - tried.append(primary) - - nodes = self.__nodes - set(tried) - - # Just scan - # TODO parallelize these to minimize connect time? - for node in nodes: - if self.__try_node(node) is True: - return node - - # Clear the connection pool so we we don't try - # running queries against a secondary without slave_okay. - self.disconnect() - raise AutoReconnect("could not find master/primary") + self.__host = None + self.__port = None + sock = None + sock_error = False + for (host, port) in self.__nodes: + try: + try: + sock = socket.socket() + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + sock.settimeout(_CONNECT_TIMEOUT) + sock.connect((host, port)) + sock.settimeout(self.__network_timeout) + master = self.__master(sock) + if master or self.__slave_okay: + self.__host = host + self.__port = port + return + except socket.error, e: + sock_error = True + finally: + if sock is not None: + sock.close() + if sock_error or self.__host is None: + raise AutoReconnect("could not find master") + raise ConfigurationError("No master node in %r. You must specify " + "slave_okay to connect to " + "slaves." % self.__nodes) def __connect(self): """(Re-)connect to Mongo and return a new (connected) socket. Connect to the master if this is a paired connection. """ - host, port = (self.__host, self.__port) - if host is None or port is None: - host, port = self.__find_master() + if self.__host is None or self.__port is None: + self.__find_master() try: - try: - # Prefer IPv4. If there is demand for an option - # to specify one or the other we can add it later. - sock = socket.socket(socket.AF_INET) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(self.__network_timeout or _CONNECT_TIMEOUT) - sock.connect((host, port)) - sock.settimeout(self.__network_timeout) - return sock - except socket.gaierror: - # If that fails try IPv6 - sock = socket.socket(socket.AF_INET6) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(self.__network_timeout or _CONNECT_TIMEOUT) - sock.connect((host, port)) - sock.settimeout(self.__network_timeout) - return sock + sock = socket.socket() + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + sock.settimeout(_CONNECT_TIMEOUT) + sock.connect((self.__host, self.__port)) + sock.settimeout(self.__network_timeout) + return sock except socket.error: - self.disconnect() - raise AutoReconnect("could not connect to %r" % list(self.__nodes)) - - def __socket(self): - """Get a socket from the pool. - - If it's been > 1 second since the last time we checked out a - socket, we also check to see if the socket has been closed - - this let's us avoid seeing *some* - :class:`~pymongo.errors.AutoReconnect` exceptions on server - hiccups, etc. We only do this if it's been > 1 second since - the last socket checkout, to keep performance reasonable - we - can't avoid those completely anyway. - """ - sock = self.__pool.socket() - t = time.time() - if t - self.__last_checkout > 1: - if _closed(sock): - self.disconnect() - sock = self.__pool.socket() - self.__last_checkout = t - return sock + raise AutoReconnect("could not connect to %r" % self.__nodes) def disconnect(self): """Disconnect from MongoDB. @@ -668,9 +485,18 @@ .. seealso:: :meth:`end_request` .. versionadded:: 1.3 """ - self.__pool = _Pool(self.__connect, self.__max_pool_size) - self.__host = None - self.__port = None + self.__pool = Pool(self.__connect) + + def _reset(self): + """Reset everything and start connecting again. + + Closes all open sockets and resets them to None. Re-finds the master. + + This should be done in case of a connection failure or a "not master" + error. + """ + self.disconnect() + self.__find_master() def set_cursor_manager(self, manager_class): """Set this connection's cursor manager. @@ -704,43 +530,18 @@ assert response["number_returned"] == 1 error = response["data"][0] - helpers._check_command_response(error, self.disconnect) - # TODO unify logic with database.error method - if error.get("err") is None: + if error.get("err", 0) is None: return error if error["err"] == "not master": - self.disconnect() - raise AutoReconnect("not master") + self._reset() - if "code" in error: - if error["code"] in [11000, 11001, 12582]: - raise DuplicateKeyError(error["err"]) - else: - raise OperationFailure(error["err"], error["code"]) + if "code" in error and error["code"] in [11000, 11001]: + raise DuplicateKeyError(error["err"]) else: raise OperationFailure(error["err"]) - def __check_bson_size(self, message): - """Make sure the message doesn't include BSON documents larger - than the connected server will accept. - - :Parameters: - - `message`: message to check - """ - if len(message) == 3: - (request_id, data, max_doc_size) = message - if max_doc_size > self.__max_bson_size: - raise InvalidDocument("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % - (max_doc_size, self.__max_bson_size)) - return (request_id, data) - else: - # get_more and kill_cursors messages - # don't include BSON documents. - return message + return error def _send_message(self, message, with_last_error=False): """Say something to Mongo. @@ -756,9 +557,9 @@ - `with_last_error`: check getLastError status after sending the message """ - sock = self.__socket() + sock = self.__pool.socket() try: - (request_id, data) = self.__check_bson_size(message) + (request_id, data) = message sock.sendall(data) # Safe mode. We pack the message together with a lastError # message and send both. We then get the response (to the @@ -770,7 +571,7 @@ return self.__check_response_to_last_error(response) return None except (ConnectionFailure, socket.error), e: - self.disconnect() + self._reset() raise AutoReconnect(str(e)) def __receive_data_on_socket(self, length, sock): @@ -804,14 +605,14 @@ def __send_and_receive(self, message, sock): """Send a message on the given socket and return the response data. """ - (request_id, data) = self.__check_bson_size(message) + (request_id, data) = message sock.sendall(data) return self.__receive_message_on_socket(1, request_id, sock) - # we just ignore _must_use_master here: it's only relevant for + # we just ignore _must_use_master here: it's only relavant for # MasterSlaveConnection instances. def _send_message_with_response(self, message, - _must_use_master=False, **kwargs): + _sock=None, _must_use_master=False): """Send a message to Mongo and return the response. Sends the given message and returns the response. @@ -819,19 +620,18 @@ :Parameters: - `message`: (request_id, data) pair making up the message to send """ - sock = self.__socket() + # hack so we can do find_master on a specific socket... + reset = False + if _sock is None: + reset = True + _sock = self.__pool.socket() try: - try: - if "network_timeout" in kwargs: - sock.settimeout(kwargs["network_timeout"]) - return self.__send_and_receive(message, sock) - except (ConnectionFailure, socket.error), e: - self.disconnect() - raise AutoReconnect(str(e)) - finally: - if "network_timeout" in kwargs: - sock.settimeout(self.__network_timeout) + return self.__send_and_receive(message, _sock) + except (ConnectionFailure, socket.error), e: + if reset: + self._reset() + raise AutoReconnect(str(e)) def start_request(self): """DEPRECATED all operations will start a request. @@ -872,8 +672,12 @@ def __repr__(self): if len(self.__nodes) == 1: return "Connection(%r, %r)" % (self.__host, self.__port) - else: - return "Connection(%r)" % ["%s:%d" % n for n in self.__nodes] + elif len(self.__nodes) == 2: + return ("Connection.paired((%r, %r), (%r, %r))" % + (self.__nodes[0][0], + self.__nodes[0][1], + self.__nodes[1][0], + self.__nodes[1][1])) def __getattr__(self, name): """Get a database by name. diff -Nru pymongo-1.11/pymongo/cursor.py pymongo-1.7/pymongo/cursor.py --- pymongo-1.11/pymongo/cursor.py 2011-04-26 17:34:30.000000000 +0000 +++ pymongo-1.7/pymongo/cursor.py 2010-06-17 15:37:47.000000000 +0000 @@ -14,12 +14,12 @@ """Cursor class to iterate over Mongo query results.""" -from bson.code import Code -from bson.son import SON from pymongo import (helpers, message) +from pymongo.code import Code from pymongo.errors import (InvalidOperation, AutoReconnect) +from pymongo.son import SON _QUERY_OPTIONS = { "tailable_cursor": 2, @@ -37,9 +37,8 @@ def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, - max_scan=None, as_class=None, slave_okay=False, - _must_use_master=False, _is_command=False, - **kwargs): + max_scan=None, as_class=None, + _sock=None, _must_use_master=False, _is_command=False): """Create a new cursor. Should not be called directly by application developers - see @@ -79,16 +78,6 @@ self.__fields = fields self.__skip = skip self.__limit = limit - self.__batch_size = 0 - - # This is ugly. People want to be able to do cursor[5:5] and - # get an empty result set (old behavior was an - # exception). It's hard to do that right, though, because the - # server uses limit(0) to mean 'no limit'. So we set __empty - # in that case and check for it when iterating. We also unset - # it anytime we change __limit. - self.__empty = False - self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot @@ -97,8 +86,7 @@ self.__explain = False self.__hint = None self.__as_class = as_class - self.__slave_okay = slave_okay - self.__tz_aware = collection.database.connection.tz_aware + self.__socket = _sock self.__must_use_master = _must_use_master self.__is_command = _is_command @@ -107,10 +95,6 @@ self.__retrieved = 0 self.__killed = False - # this is for passing network_timeout through if it's specified - # need to use kwargs as None is a legit value for network_timeout - self.__kwargs = kwargs - @property def collection(self): """The :class:`~pymongo.collection.Collection` that this @@ -151,17 +135,11 @@ """ copy = Cursor(self.__collection, self.__spec, self.__fields, self.__skip, self.__limit, self.__timeout, - self.__snapshot, self.__tailable) + self.__tailable, self.__snapshot) copy.__ordering = self.__ordering copy.__explain = self.__explain copy.__hint = self.__hint - copy.__batch_size = self.__batch_size - copy.__max_scan = self.__max_scan - copy.__as_class = self.__as_class - copy.__slave_okay = self.__slave_okay - copy.__must_use_master = self.__must_use_master - copy.__is_command = self.__is_command - copy.__kwargs = self.__kwargs + copy.__socket = self.__socket return copy def __die(self): @@ -199,8 +177,7 @@ options = 0 if self.__tailable: options |= _QUERY_OPTIONS["tailable_cursor"] - if (self.__collection.database.connection.slave_okay or - self.__slave_okay): + if self.__collection.database.connection.slave_okay: options |= _QUERY_OPTIONS["slave_okay"] if not self.__timeout: options |= _QUERY_OPTIONS["no_timeout"] @@ -216,9 +193,8 @@ """Limits the number of results to be returned by this cursor. Raises TypeError if limit is not an instance of int. Raises - InvalidOperation if this cursor has already been used. The - last `limit` applied to this cursor takes precedence. A limit - of ``0`` is equivalent to no limit. + InvalidOperation if this cursor has already been used. The last `limit` + applied to this cursor takes precedence. :Parameters: - `limit`: the number of results to return @@ -229,34 +205,9 @@ raise TypeError("limit must be an int") self.__check_okay_to_chain() - self.__empty = False self.__limit = limit return self - def batch_size(self, batch_size): - """Set the size for batches of results returned by this cursor. - - Raises :class:`TypeError` if `batch_size` is not an instance - of :class:`int`. Raises :class:`ValueError` if `batch_size` is - less than ``0``. Raises - :class:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. The last `batch_size` - applied to this cursor takes precedence. - - :Parameters: - - `batch_size`: The size of each batch of results requested. - - .. versionadded:: 1.9 - """ - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an int") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - self.__check_okay_to_chain() - - self.__batch_size = batch_size == 1 and 2 or batch_size - return self - def skip(self, skip): """Skips the first `skip` results of this cursor. @@ -304,7 +255,6 @@ - `index`: An integer or slice index to be applied to this cursor """ self.__check_okay_to_chain() - self.__empty = False if isinstance(index, slice): if index.step is not None: raise IndexError("Cursor instances do not support slice steps") @@ -318,11 +268,9 @@ if index.stop is not None: limit = index.stop - skip - if limit < 0: + if limit <= 0: raise IndexError("stop index must be greater than start" "index for slice %r" % index) - if limit == 0: - self.__empty = True else: limit = 0 @@ -493,7 +441,7 @@ """Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` - or :class:`~bson.code.Code` containing a JavaScript + or :class:`~pymongo.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as @@ -520,10 +468,10 @@ """Send a query or getmore message and handles the response. """ db = self.__collection.database - kwargs = {"_must_use_master": self.__must_use_master} + kwargs = {"_sock": self.__socket, + "_must_use_master": self.__must_use_master} if self.__connection_id is not None: kwargs["_connection_to_use"] = self.__connection_id - kwargs.update(self.__kwargs) response = db.connection._send_message_with_response(message, **kwargs) @@ -537,10 +485,9 @@ try: response = helpers._unpack_response(response, self.__id, - self.__as_class, - self.__tz_aware) + self.__as_class) except AutoReconnect: - db.connection.disconnect() + db.connection._reset() raise self.__id = response["cursor_id"] @@ -564,7 +511,8 @@ if len(self.__data) or self.__killed: return len(self.__data) - if self.__id is None: # Query + if self.__id is None: + # Query self.__send_message( message.query(self.__query_options(), self.__collection.full_name, @@ -572,13 +520,15 @@ self.__query_spec(), self.__fields)) if not self.__id: self.__killed = True - elif self.__id: # Get More + elif self.__id: + # Get More + limit = 0 if self.__limit: - limit = self.__limit - self.__retrieved - if self.__batch_size: - limit = min(limit, self.__batch_size) - else: - limit = self.__batch_size + if self.__limit > self.__retrieved: + limit = self.__limit - self.__retrieved + else: + self.__killed = True + return 0 self.__send_message( message.get_more(self.__collection.full_name, @@ -603,8 +553,6 @@ return self def next(self): - if self.__empty: - raise StopIteration db = self.__collection.database if len(self.__data) or self._refresh(): next = db._fix_outgoing(self.__data.pop(0), self.__collection) diff -Nru pymongo-1.11/pymongo/database.py pymongo-1.7/pymongo/database.py --- pymongo-1.11/pymongo/database.py 2011-05-04 23:26:59.000000000 +0000 +++ pymongo-1.7/pymongo/database.py 2010-06-04 18:05:46.000000000 +0000 @@ -16,14 +16,14 @@ import warnings -from bson.code import Code -from bson.dbref import DBRef -from bson.son import SON from pymongo import helpers +from pymongo.code import Code from pymongo.collection import Collection +from pymongo.dbref import DBRef from pymongo.errors import (CollectionInvalid, InvalidName, OperationFailure) +from pymongo.son import SON from pymongo.son_manipulator import ObjectIdInjector @@ -222,7 +222,7 @@ return son def command(self, command, value=1, - check=True, allowable_errors=[], **kwargs): + check=True, allowable_errors=[], _sock=None, **kwargs): """Issue a MongoDB command. Send command `command` to the database and return the @@ -256,7 +256,7 @@ .. note:: the order of keys in the `command` document is significant (the "verb" must come first), so commands which require multiple keys (e.g. `findandmodify`) - should use an instance of :class:`~bson.son.SON` or + should use an instance of :class:`~pymongo.son.SON` or a string and kwargs instead of a Python `dict`. - `value` (optional): value to use for the command verb when @@ -283,15 +283,15 @@ command.update(kwargs) - result = self["$cmd"].find_one(command, + result = self["$cmd"].find_one(command, _sock=_sock, _must_use_master=True, _is_command=True) - if check: - msg = "command %r failed: %%s" % command - helpers._check_command_response(result, self.connection.disconnect, - msg, allowable_errors) - + if check and not result["ok"]: + if result["errmsg"] in allowable_errors: + return result + raise OperationFailure("command %r failed: %s" % + (command, result["errmsg"])) return result def collection_names(self): @@ -323,32 +323,11 @@ self.command("drop", unicode(name), allowable_errors=["ns not found"]) - def validate_collection(self, name_or_collection, - scandata=False, full=False): + def validate_collection(self, name_or_collection): """Validate a collection. - Returns a dict of validation info. Raises CollectionInvalid if + Returns a string of validation info. Raises CollectionInvalid if validation fails. - - With MongoDB < 1.9 the result dict will include a `result` key - with a string value that represents the validation results. With - MongoDB >= 1.9 the `result` key no longer exists and the results - are split into individual fields in the result dict. - - :Parameters: - `name_or_collection`: A Collection object or the name of a - collection to validate. - `scandata`: Do extra checks beyond checking the overall - structure of the collection. - `full`: Have the server do a more thorough scan of the - collection. Use with `scandata` for a thorough scan - of the structure of the collection and the individual - documents. Ignored in MongoDB versions before 1.9. - - .. versionchanged:: 1.10.1+ - validate_collection previously returned a string. - .. versionadded:: 1.10.1+ - Added `scandata` and `full` options. """ name = name_or_collection if isinstance(name, Collection): @@ -358,35 +337,12 @@ raise TypeError("name_or_collection must be an instance of " "(Collection, str, unicode)") - result = self.command("validate", unicode(name), - scandata=scandata, full=full) - - valid = True - # Pre 1.9 results - if "result" in result: - info = result["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) - # Sharded results - elif "raw" in result: - for repl, res in result["raw"].iteritems(): - if "result" in res: - info = res["result"] - if (info.find("exception") != -1 or - info.find("corrupt") != -1): - raise CollectionInvalid("%s invalid: " - "%s" % (name, info)) - elif not res.get("valid", False): - valid = False - break - # Post 1.9 non-sharded results. - elif not result.get("valid", False): - valid = False + result = self.command("validate", unicode(name)) - if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) - - return result + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid("%s invalid: %s" % (name, info)) + return info def profiling_level(self): """Get the database's current profiling level. @@ -435,7 +391,7 @@ if error.get("err", 0) is None: return None if error["err"] == "not master": - self.__connection.disconnect() + self.__connection._reset() return error def last_status(self): @@ -496,7 +452,7 @@ User `name` will no longer have permissions to access this :class:`Database`. - :Parameters: + :Paramaters: - `name`: the name of the user to remove .. versionadded:: 1.4 @@ -568,14 +524,12 @@ self.command("logout") def dereference(self, dbref): - """Dereference a :class:`~bson.dbref.DBRef`, getting the - document it points to. + """Dereference a DBRef, getting the SON object it points to. - Raises :class:`TypeError` if `dbref` is not an instance of - :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if - the reference does not point to a valid document. Raises - :class:`ValueError` if `dbref` has a database specified that - is different from the current database. + Raises TypeError if `dbref` is not an instance of DBRef. Returns a SON + object or None if the reference does not point to a valid object. + Raises ValueError if `dbref` has a database specified that is different + from the current database. :Parameters: - `dbref`: the reference @@ -589,24 +543,22 @@ return self[dbref.collection].find_one({"_id": dbref.id}) def eval(self, code, *args): - """Evaluate a JavaScript expression in MongoDB. + """Evaluate a JavaScript expression on the Mongo server. - Useful if you need to touch a lot of data lightly; in such a - scenario the network transfer of the data could be a - bottleneck. The `code` argument must be a JavaScript - function. Additional positional arguments will be passed to - that function when it is run on the server. - - Raises :class:`TypeError` if `code` is not an instance of - (str, unicode, `Code`). Raises - :class:`~pymongo.errors.OperationFailure` if the eval - fails. Returns the result of the evaluation. - - :Parameters: - - `code`: string representation of JavaScript code to be - evaluated - - `args` (optional): additional positional arguments are - passed to the `code` being evaluated + Useful if you need to touch a lot of data lightly; in such a scenario + the network transfer of the data could be a bottleneck. The `code` + argument must be a JavaScript function. Additional positional + arguments will be passed to that function when it is run on the + server. + + Raises TypeError if `code` is not an instance of (str, unicode, + `Code`). Raises OperationFailure if the eval fails. Returns the result + of the evaluation. + + :Parameters: + - `code`: string representation of JavaScript code to be evaluated + - `args` (optional): additional positional arguments are passed to + the `code` being evaluated """ if not isinstance(code, Code): code = Code(code) @@ -618,7 +570,7 @@ """This is only here so that some API misusages are easier to debug. """ raise TypeError("'Database' object is not callable. If you meant to " - "call the '%s' method on a 'Connection' object it is " + "call the '%s' method on a 'Collection' object it is " "failing because no such method exists." % self.__name) @@ -634,7 +586,7 @@ manual instantiation of this class should not be necessary. :class:`SystemJS` instances allow for easy manipulation and - access to server-side JavaScript: + access to `server-side JavaScript`_: .. doctest:: @@ -650,23 +602,21 @@ .. note:: Requires server version **>= 1.1.1** .. versionadded:: 1.5 + + .. _server-side JavaScript: http://www.mongodb.org/display/DOCS/Server-side+Code+Execution#Server-sideCodeExecution-Storingfunctionsserverside """ # can't just assign it since we've overridden __setattr__ - object.__setattr__(self, "_db", database) + object.__setattr__(self, "_database", database) def __setattr__(self, name, code): - self._db.system.js.save({"_id": name, "value": Code(code)}, safe=True) + self._database.system.js.save({"_id": name, "value": Code(code)}, + safe=True) def __delattr__(self, name): - self._db.system.js.remove({"_id": name}, safe=True) + self._database.system.js.remove({"_id": name}, safe=True) def __getattr__(self, name): - return lambda *args: self._db.eval("function() { return %s.apply(this," - "arguments); }" % name, *args) - - def list(self): - """Get a list of the names of the functions stored in this database. - - .. versionadded:: 1.9 - """ - return [x["_id"] for x in self._db.system.js.find(fields=["_id"])] + return lambda *args: self._database.eval("function() { return %s." + "apply(this, " + "arguments); }" % name, + *args) diff -Nru pymongo-1.11/pymongo/dbref.py pymongo-1.7/pymongo/dbref.py --- pymongo-1.11/pymongo/dbref.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/dbref.py 2010-06-11 19:23:17.000000000 +0000 @@ -12,4 +12,88 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.dbref import * +"""Tools for manipulating DBRefs (references to MongoDB documents).""" + +from pymongo.son import SON + + +class DBRef(object): + """A reference to a document stored in a Mongo database. + """ + + def __init__(self, collection, id, database=None): + """Initialize a new :class:`DBRef`. + + Raises :class:`TypeError` if `collection` or `database` is not + an instance of :class:`basestring`. `database` is optional and + allows references to documents to work across databases. + + :Parameters: + - `collection`: name of the collection the document is stored in + - `id`: the value of the document's ``"_id"`` field + - `database` (optional): name of the database to reference + + .. versionadded:: 1.1.1 + The `database` parameter. + + .. mongodoc:: dbrefs + """ + if not isinstance(collection, basestring): + raise TypeError("collection must be an instance of basestring") + if database is not None and not isinstance(database, basestring): + raise TypeError("database must be an instance of basestring") + + self.__collection = collection + self.__id = id + self.__database = database + + @property + def collection(self): + """Get the name of this DBRef's collection as unicode. + """ + return self.__collection + + @property + def id(self): + """Get this DBRef's _id. + """ + return self.__id + + @property + def database(self): + """Get the name of this DBRef's database. + + Returns None if this DBRef doesn't specify a database. + + .. versionadded:: 1.1.1 + """ + return self.__database + + def as_doc(self): + """Get the SON document representation of this DBRef. + + Generally not needed by application developers + """ + doc = SON([("$ref", self.collection), + ("$id", self.id)]) + if self.database is not None: + doc["$db"] = self.database + return doc + + def __repr__(self): + if self.database is None: + return "DBRef(%r, %r)" % (self.collection, self.id) + return "DBRef(%r, %r, %r)" % (self.collection, self.id, self.database) + + def __cmp__(self, other): + if isinstance(other, DBRef): + return cmp([self.__database, self.__collection, self.__id], + [other.__database, other.__collection, other.__id]) + return NotImplemented + + def __hash__(self): + """Get a hash value for this :class:`DBRef`. + + .. versionadded:: 1.1 + """ + return hash((self.__collection, self.__id, self.__database)) diff -Nru pymongo-1.11/pymongo/encoding_helpers.c pymongo-1.7/pymongo/encoding_helpers.c --- pymongo-1.11/pymongo/encoding_helpers.c 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/encoding_helpers.c 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,118 @@ +/* + * Copyright 2009-2010 10gen, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "encoding_helpers.h" + +/* + * Portions Copyright 2001 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* --------------------------------------------------------------------- */ + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * The length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const unsigned char* source, int length) { + unsigned char a; + const unsigned char* srcptr = source + length; + switch (length) { + default: return 0; + /* Everything else falls through when "true"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + if (*source > 0xF4) return 0; + } + return 1; +} + +result_t check_string(const unsigned char* string, const int length, + const char check_utf8, const char check_null) { + int position = 0; + /* By default we go character by character. Will be different for checking + * UTF-8 */ + int sequence_length = 1; + + if (!check_utf8 && !check_null) { + return VALID; + } + + while (position < length) { + if (check_null && *(string + position) == 0) { + return HAS_NULL; + } + if (check_utf8) { + sequence_length = trailingBytesForUTF8[*(string + position)] + 1; + if ((position + sequence_length) > length) { + return NOT_UTF_8; + } + if (!isLegalUTF8(string + position, sequence_length)) { + return NOT_UTF_8; + } + } + position += sequence_length; + } + + return VALID; +} diff -Nru pymongo-1.11/pymongo/encoding_helpers.h pymongo-1.7/pymongo/encoding_helpers.h --- pymongo-1.11/pymongo/encoding_helpers.h 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/encoding_helpers.h 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,29 @@ +/* + * Copyright 2009-2010 10gen, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ENCODING_HELPERS_H +#define ENCODING_HELPERS_H + +typedef enum { + VALID, + NOT_UTF_8, + HAS_NULL +} result_t; + +result_t check_string(const unsigned char* string, const int length, + const char check_utf8, const char check_null); + +#endif diff -Nru pymongo-1.11/pymongo/errors.py pymongo-1.7/pymongo/errors.py --- pymongo-1.11/pymongo/errors.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/errors.py 2010-06-04 18:05:46.000000000 +0000 @@ -14,8 +14,6 @@ """Exceptions raised by PyMongo.""" -from bson.errors import * - class PyMongoError(Exception): """Base class for all PyMongo exceptions. @@ -48,20 +46,6 @@ class OperationFailure(PyMongoError): """Raised when a database operation fails. - - .. versionadded:: 1.8 - The :attr:`code` attribute. - """ - - def __init__(self, error, code=None): - self.code = code - PyMongoError.__init__(self, error) - - -class TimeoutError(OperationFailure): - """Raised when a database operation times out. - - .. versionadded:: 1.8 """ @@ -79,17 +63,37 @@ """ +class CollectionInvalid(PyMongoError): + """Raised when collection validation fails. + """ + + class InvalidName(PyMongoError): """Raised when an invalid name is used. """ -class CollectionInvalid(PyMongoError): - """Raised when collection validation fails. +class InvalidBSON(PyMongoError): + """Raised when trying to create a BSON object from invalid data. + """ + + +class InvalidStringData(PyMongoError): + """Raised when trying to encode a string containing non-UTF8 data. + """ + + +class InvalidDocument(PyMongoError): + """Raised when trying to create a BSON object from an invalid document. + """ + + +class InvalidId(PyMongoError): + """Raised when trying to create an ObjectId from invalid data. """ -class InvalidURI(ConfigurationError): +class InvalidURI(PyMongoError): """Raised when trying to parse an invalid mongodb URI. .. versionadded:: 1.5 diff -Nru pymongo-1.11/pymongo/helpers.py pymongo-1.7/pymongo/helpers.py --- pymongo-1.11/pymongo/helpers.py 2011-03-14 22:30:07.000000000 +0000 +++ pymongo-1.7/pymongo/helpers.py 2010-06-04 18:05:46.000000000 +0000 @@ -22,12 +22,11 @@ _md5func = md5.new import struct -import bson -from bson.son import SON import pymongo -from pymongo.errors import (AutoReconnect, - OperationFailure, - TimeoutError) +from pymongo import bson +from pymongo.errors import (OperationFailure, + AutoReconnect) +from pymongo.son import SON def _index_list(key_or_list, direction=None): @@ -57,7 +56,7 @@ "mean %r?" % list(index_list.iteritems())) elif not isinstance(index_list, list): raise TypeError("must use a list of (key, direction) pairs, " - "not: " + repr(index_list)) + "not: %r" % index_list) if not len(index_list): raise ValueError("key_or_list must not be the empty list") @@ -72,7 +71,7 @@ return index -def _unpack_response(response, cursor_id=None, as_class=dict, tz_aware=False): +def _unpack_response(response, cursor_id=None, as_class=dict): """Unpack a response from the database. Check the response for errors and unpack, returning a dictionary @@ -93,7 +92,7 @@ raise OperationFailure("cursor id '%s' not valid at server" % cursor_id) elif response_flag & 2: - error_object = bson.BSON(response[20:]).decode() + error_object = bson.BSON(response[20:]).to_dict() if error_object["$err"] == "not master": raise AutoReconnect("master has changed") raise OperationFailure("database error: %s" % @@ -103,29 +102,11 @@ result["cursor_id"] = struct.unpack("`_. They allow for specialized +encoding and decoding of MongoDB documents into `Mongo Extended JSON +`_'s *Strict* mode. +This lets you encode / decode MongoDB documents to JSON even when they use +special PyMongo types. + +Example usage (serialization):: + +>>> json.dumps(..., default=json_util.default) + +Example usage (deserialization):: + +>>> json.loads(..., object_hook=json_util.object_hook) + +Currently this does not handle special encoding and decoding for +:class:`~pymongo.binary.Binary` and :class:`~pymongo.code.Code` +instances. + +.. versionchanged:: 1.2 + Added support for encoding/decoding datetimes and regular expressions. +""" + +import calendar +import datetime +import re + +from pymongo.dbref import DBRef +from pymongo.objectid import ObjectId + +# TODO support Binary and Code +# Binary and Code are tricky because they subclass str so json thinks it can +# handle them. Not sure what the proper way to get around this is... +# +# One option is to just add some other method that users need to call _before_ +# calling json.dumps or json.loads. That is pretty terrible though... + +# TODO share this with bson.py? +_RE_TYPE = type(re.compile("foo")) + + +def object_hook(dct): + if "$oid" in dct: + return ObjectId(str(dct["$oid"])) + if "$ref" in dct: + return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None)) + if "$date" in dct: + return datetime.datetime.utcfromtimestamp(float(dct["$date"]) / 1000.0) + if "$regex" in dct: + flags = 0 + if "i" in dct["$options"]: + flags |= re.IGNORECASE + if "m" in dct["$options"]: + flags |= re.MULTILINE + return re.compile(dct["$regex"], flags) + return dct + + +def default(obj): + if isinstance(obj, ObjectId): + return {"$oid": str(obj)} + if isinstance(obj, DBRef): + return obj.as_doc() + if isinstance(obj, datetime.datetime): + # TODO share this code w/ bson.py? + millis = int(calendar.timegm(obj.timetuple()) * 1000 + + obj.microsecond / 1000) + return {"$date": millis} + if isinstance(obj, _RE_TYPE): + flags = "" + if obj.flags & re.IGNORECASE: + flags += "i" + if obj.flags & re.MULTILINE: + flags += "m" + return {"$regex": obj.pattern, + "$options": flags} + raise TypeError("%r is not JSON serializable" % obj) diff -Nru pymongo-1.11/pymongo/master_slave_connection.py pymongo-1.7/pymongo/master_slave_connection.py --- pymongo-1.11/pymongo/master_slave_connection.py 2011-04-06 22:36:02.000000000 +0000 +++ pymongo-1.7/pymongo/master_slave_connection.py 2010-06-04 18:05:46.000000000 +0000 @@ -15,15 +15,12 @@ """Master-Slave connection to Mongo. Performs all writes to Master instance and distributes reads among all -slaves. Reads are tried on each slave in turn until the read succeeds -or all slaves failed. -""" +instances.""" import random from pymongo.connection import Connection from pymongo.database import Database -from pymongo.errors import AutoReconnect class MasterSlaveConnection(object): @@ -69,16 +66,6 @@ def slaves(self): return self.__slaves - # TODO this is a temporary hack PYTHON-136 is the right solution for this - @property - def document_class(self): - return dict - - # TODO this is a temporary hack PYTHON-136 is the right solution for this - @property - def tz_aware(self): - return True - @property def slave_okay(self): """Is it okay for this connection to connect directly to a slave? @@ -87,19 +74,6 @@ """ return True - def disconnect(self): - """Disconnect from MongoDB. - - Disconnecting will call disconnect on all master and slave - connections. - - .. seealso:: Module :mod:`~pymongo.connection` - .. versionadded:: 1.10+ - """ - self.__master.disconnect() - for slave in self.__slaves: - slave.disconnect() - def set_cursor_manager(self, manager_class): """Set the cursor manager for this connection. @@ -134,8 +108,9 @@ # _connection_to_use is a hack that we need to include to make sure # that getmore operations can be sent to the same instance on which # the cursor actually resides... - def _send_message_with_response(self, message, _connection_to_use=None, - _must_use_master=False, **kwargs): + def _send_message_with_response(self, message, + _sock=None, _connection_to_use=None, + _must_use_master=False): """Receive a message from Mongo. Sends the given message and returns a (connection_id, response) pair. @@ -146,34 +121,26 @@ """ if _connection_to_use is not None: if _connection_to_use == -1: - return (-1, - self.__master._send_message_with_response(message, - **kwargs)) + return (-1, self.__master._send_message_with_response(message, + _sock)) else: return (_connection_to_use, self.__slaves[_connection_to_use] - ._send_message_with_response(message, **kwargs)) + ._send_message_with_response(message, _sock)) + + # for now just load-balance randomly among slaves only... + connection_id = random.randrange(0, len(self.__slaves)) # _must_use_master is set for commands, which must be sent to the # master instance. any queries in a request must be sent to the # master since that is where writes go. - if _must_use_master or self.__in_request: + if _must_use_master or self.__in_request or connection_id == -1: return (-1, self.__master._send_message_with_response(message, - **kwargs)) - - # Iterate through the slaves randomly until we have success. Raise - # reconnect if they all fail. - for connection_id in random.sample(range(0, - len(self.__slaves)), - len(self.__slaves)): - try: - slave = self.__slaves[connection_id] - return (connection_id, - slave._send_message_with_response(message, **kwargs)) - except AutoReconnect: - pass + _sock)) - raise AutoReconnect("failed to connect to slaves") + slaves = self.__slaves[connection_id] + return (connection_id, slaves._send_message_with_response(message, + _sock)) def start_request(self): """Start a "request". diff -Nru pymongo-1.11/pymongo/max_key.py pymongo-1.7/pymongo/max_key.py --- pymongo-1.11/pymongo/max_key.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/max_key.py 2010-06-17 15:37:47.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2009-2010 10gen, Inc. +# Copyright 2010 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.max_key import * +"""Representation for the MongoDB internal MaxKey type. + +.. versionadded:: 1.7 +""" + +class MaxKey(object): + """MongoDB internal MaxKey type. + """ + + def __eq__(self, other): + if isinstance(other, MaxKey): + return True + return NotImplemented + + def __repr__(self): + return "MaxKey()" diff -Nru pymongo-1.11/pymongo/message.py pymongo-1.7/pymongo/message.py --- pymongo-1.11/pymongo/message.py 2011-04-18 17:56:59.000000000 +0000 +++ pymongo-1.7/pymongo/message.py 2010-06-04 18:05:46.000000000 +0000 @@ -25,8 +25,7 @@ import random import struct -import bson -from bson.son import SON +from pymongo import bson try: from pymongo import _cbson _use_c = True @@ -37,16 +36,11 @@ __ZERO = "\x00\x00\x00\x00" -MAX_INT32 = 2147483647 -MIN_INT32 = -2147483648 - -def __last_error(args): +def __last_error(): """Data to send to do a lastError. """ - cmd = SON([("getlasterror", 1)]) - cmd.update(args) - return query(0, "admin.$cmd", 0, -1, cmd) + return query(0, "admin.$cmd", 0, -1, {"getlasterror": 1}) def __pack_message(operation, data): @@ -54,7 +48,7 @@ Returns the resultant message string. """ - request_id = random.randint(MIN_INT32, MAX_INT32) + request_id = random.randint(-2 ** 31 - 1, 2 ** 31) message = struct.pack("`_. +""" + +import calendar +import datetime +try: + import hashlib + _md5func = hashlib.md5 +except ImportError: # for Python < 2.5 + import md5 + _md5func = md5.new +import os +import socket +import struct +import threading +import time + +from pymongo.errors import InvalidId + + +def _machine_bytes(): + """Get the machine portion of an ObjectId. + """ + machine_hash = _md5func() + machine_hash.update(socket.gethostname()) + return machine_hash.digest()[0:3] + + +class ObjectId(object): + """A Mongo ObjectId. + """ + + _inc = 0 + _inc_lock = threading.Lock() + + _machine_bytes = _machine_bytes() + + def __init__(self, oid=None): + """Initialize a new ObjectId. + + If `oid` is ``None``, create a new (unique) ObjectId. If `oid` + is an instance of (``basestring``, :class:`ObjectId`) validate + it and use that. Otherwise, a :class:`TypeError` is + raised. If `oid` is invalid, + :class:`~pymongo.errors.InvalidId` is raised. + + :Parameters: + - `oid` (optional): a valid ObjectId (12 byte binary or 24 character + hex string) + + .. versionadded:: 1.2.1 + The `oid` parameter can be a ``unicode`` instance (that contains + only hexadecimal digits). + + .. mongodoc:: objectids + """ + if oid is None: + self.__generate() + else: + self.__validate(oid) + + @classmethod + def from_datetime(cls, generation_time): + """Create a dummy ObjectId instance with a specific generation time. + + This method is useful for doing range queries on a field + containing :class:`ObjectId` instances. + + .. warning:: + It is not safe to insert a document containing an ObjectId + generated using this method. This method deliberately + eliminates the uniqueness guarantee that ObjectIds + generally provide. ObjectIds generated with this method + should be used exclusively in queries. + + `generation_time` will be converted to a UTC timestamp + naively. Pass either a naive :class:`~datetime.datetime` + instance containing UTC, or an aware instance that has been + converted to UTC. + + An example using this helper to get documents where ``"_id"`` + was generated before January 1, 2010 would be: + + >>> gen_time = datetime.datetime(2010, 1, 1) + >>> dummy_id = ObjectId.from_datetime(gen_time) + >>> result = collection.find({"_id": {"$lt": dummy_id}}) + + :Parameters: + - `generation_time`: :class:`~datetime.datetime` to be used + as the generation time for the resulting ObjectId. + + .. versionadded:: 1.6 + """ + ts = calendar.timegm(generation_time.timetuple()) + oid = struct.pack(">i", int(ts)) + "\x00" * 8 + return cls(oid) + + def __generate(self): + """Generate a new value for this ObjectId. + """ + oid = "" + + # 4 bytes current time + oid += struct.pack(">i", int(time.time())) + + # 3 bytes machine + oid += ObjectId._machine_bytes + + # 2 bytes pid + oid += struct.pack(">H", os.getpid() % 0xFFFF) + + # 3 bytes inc + ObjectId._inc_lock.acquire() + oid += struct.pack(">i", ObjectId._inc)[1:4] + ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF + ObjectId._inc_lock.release() + + self.__id = oid + + def __validate(self, oid): + """Validate and use the given id for this ObjectId. + + Raises TypeError if id is not an instance of (str, ObjectId) and + InvalidId if it is not a valid ObjectId. + + :Parameters: + - `oid`: a valid ObjectId + """ + if isinstance(oid, ObjectId): + self.__id = oid.__id + elif isinstance(oid, basestring): + if len(oid) == 12: + self.__id = oid + elif len(oid) == 24: + try: + self.__id = oid.decode("hex") + except TypeError: + raise InvalidId("%s is not a valid ObjectId" % oid) + else: + raise InvalidId("%s is not a valid ObjectId" % oid) + else: + raise TypeError("id must be an instance of (str, ObjectId), " + "not %s" % type(oid)) + + @property + def binary(self): + """12-byte binary representation of this ObjectId. + """ + return self.__id + + @property + def generation_time(self): + """A :class:`datetime.datetime` instance representing the time of + generation for this :class:`ObjectId`. + + The :class:`datetime.datetime` is always naive and represents the + generation time in UTC. It is precise to the second. + + .. versionadded:: 1.2 + """ + t = struct.unpack(">i", self.__id[0:4])[0] + return datetime.datetime.utcfromtimestamp(t) + + def __str__(self): + return self.__id.encode("hex") + + def __repr__(self): + return "ObjectId('%s')" % self.__id.encode("hex") + + def __cmp__(self, other): + if isinstance(other, ObjectId): + return cmp(self.__id, other.__id) + return NotImplemented + + def __hash__(self): + """Get a hash value for this :class:`ObjectId`. + + .. versionadded:: 1.1 + """ + return hash(self.__id) diff -Nru pymongo-1.11/pymongo/son_manipulator.py pymongo-1.7/pymongo/son_manipulator.py --- pymongo-1.11/pymongo/son_manipulator.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/son_manipulator.py 2010-05-19 14:01:01.000000000 +0000 @@ -18,9 +18,9 @@ installed on a database by calling `pymongo.database.Database.add_son_manipulator`.""" -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.son import SON +from pymongo.dbref import DBRef +from pymongo.objectid import ObjectId +from pymongo.son import SON class SONManipulator(object): diff -Nru pymongo-1.11/pymongo/son.py pymongo-1.7/pymongo/son.py --- pymongo-1.11/pymongo/son.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/son.py 2010-05-19 14:01:01.000000000 +0000 @@ -12,4 +12,193 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.son import * +"""Tools for creating and manipulating SON, the Serialized Ocument Notation. + +Regular dictionaries can be used instead of SON objects, but not when the order +of keys is important. A SON object can be used just like a normal Python +dictionary.""" + + +class SON(dict): + """SON data. + + A subclass of dict that maintains ordering of keys and provides a few extra + niceties for dealing with SON. SON objects can be saved and retrieved from + Mongo. + + The mapping from Python types to Mongo types is as follows: + + =================================== ============= =================== + Python Type Mongo Type Supported Direction + =================================== ============= =================== + None null both + bool boolean both + int number (int) both + float number (real) both + string string py -> mongo + unicode string both + list array both + dict / `SON` object both + datetime.datetime [#dt]_ [#dt2]_ date both + compiled re regex both + `pymongo.binary.Binary` binary both + `pymongo.objectid.ObjectId` oid both + `pymongo.dbref.DBRef` dbref both + None undefined mongo -> py + unicode code mongo -> py + `pymongo.code.Code` code py -> mongo + unicode symbol mongo -> py + =================================== ============= =================== + + Note that to save binary data it must be wrapped as an instance of + `pymongo.binary.Binary`. Otherwise it will be saved as a Mongo string and + retrieved as unicode. + + .. [#dt] datetime.datetime instances will be rounded to the nearest + millisecond when saved + .. [#dt2] all datetime.datetime instances are treated as *naive*. clients + should always use UTC. + """ + + def __init__(self, data=None, **kwargs): + self.__keys = [] + dict.__init__(self) + self.update(data) + self.update(kwargs) + + def __repr__(self): + result = [] + for key in self.__keys: + result.append("(%r, %r)" % (key, self[key])) + return "SON([%s])" % ", ".join(result) + + def __setitem__(self, key, value): + if key not in self: + self.__keys.append(key) + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + self.__keys.remove(key) + dict.__delitem__(self, key) + + def keys(self): + return list(self.__keys) + + def copy(self): + other = SON() + other.update(self) + return other + + # TODO this is all from UserDict.DictMixin. it could probably be made more + # efficient. + # second level definitions support higher levels + def __iter__(self): + for k in self.keys(): + yield k + + def has_key(self, key): + return key in self.keys() + + def __contains__(self, key): + return key in self.keys() + + # third level takes advantage of second level definitions + def iteritems(self): + for k in self: + yield (k, self[k]) + + def iterkeys(self): + return self.__iter__() + + # fourth level uses definitions from lower levels + def itervalues(self): + for _, v in self.iteritems(): + yield v + + def values(self): + return [v for _, v in self.iteritems()] + + def items(self): + return list(self.iteritems()) + + def clear(self): + for key in self.keys(): + del self[key] + + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + + def pop(self, key, *args): + if len(args) > 1: + raise TypeError("pop expected at most 2 arguments, got "\ + + repr(1 + len(args))) + try: + value = self[key] + except KeyError: + if args: + return args[0] + raise + del self[key] + return value + + def popitem(self): + try: + k, v = self.iteritems().next() + except StopIteration: + raise KeyError('container is empty') + del self[k] + return (k, v) + + def update(self, other=None, **kwargs): + # Make progressively weaker assumptions about "other" + if other is None: + pass + elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups + for k, v in other.iteritems(): + self[k] = v + elif hasattr(other, 'keys'): + for k in other.keys(): + self[k] = other[k] + else: + for k, v in other: + self[k] = v + if kwargs: + self.update(kwargs) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __cmp__(self, other): + if isinstance(other, SON): + return cmp((dict(self.iteritems()), self.keys()), + (dict(other.iteritems()), other.keys())) + return cmp(dict(self.iteritems()), other) + + def __len__(self): + return len(self.keys()) + + def to_dict(self): + """Convert a SON document to a normal Python dictionary instance. + + This is trickier than just *dict(...)* because it needs to be + recursive. + """ + + def transform_value(value): + if isinstance(value, list): + return [transform_value(v) for v in value] + if isinstance(value, SON): + value = dict(value) + if isinstance(value, dict): + for k, v in value.iteritems(): + value[k] = transform_value(v) + return value + + return transform_value(dict(self)) diff -Nru pymongo-1.11/pymongo/time64.c pymongo-1.7/pymongo/time64.c --- pymongo-1.11/pymongo/time64.c 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/time64.c 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,823 @@ +/* + +Copyright (c) 2007-2010 Michael G Schwern + +This software originally derived from Paul Sheer's pivotal_gmtime_r.c. + +The MIT License: + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +*/ + +/* + +Programmers who have available to them 64-bit time values as a 'long +long' type can use localtime64_r() and gmtime64_r() which correctly +converts the time even on 32-bit systems. Whether you have 64-bit time +values will depend on the operating system. + +localtime64_r() is a 64-bit equivalent of localtime_r(). + +gmtime64_r() is a 64-bit equivalent of gmtime_r(). + +*/ + +#include +#include +#include +#include +#include +#include +#include "time64.h" +#include "time64_limits.h" + + +/* Spec says except for stftime() and the _r() functions, these + all return static memory. Stabbings! */ +static struct TM Static_Return_Date; +static char Static_Return_String[35]; + +static const int days_in_month[2][12] = { + {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, + {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, +}; + +static const int julian_days_by_month[2][12] = { + {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334}, + {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}, +}; + +static char wday_name[7][4] = { + "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" +}; + +static char mon_name[12][4] = { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" +}; + +static const int length_of_year[2] = { 365, 366 }; + +/* Some numbers relating to the gregorian cycle */ +static const Year years_in_gregorian_cycle = 400; +#define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) +static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; + +/* Year range we can trust the time funcitons with */ +#define MAX_SAFE_YEAR 2037 +#define MIN_SAFE_YEAR 1971 + +/* 28 year Julian calendar cycle */ +#define SOLAR_CYCLE_LENGTH 28 + +/* Year cycle from MAX_SAFE_YEAR down. */ +static const int safe_years_high[SOLAR_CYCLE_LENGTH] = { + 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, + 2024, 2025, 2026, 2027, + 2028, 2029, 2030, 2031, + 2032, 2033, 2034, 2035, + 2036, 2037, 2010, 2011, + 2012, 2013, 2014, 2015 +}; + +/* Year cycle from MIN_SAFE_YEAR up */ +static const int safe_years_low[SOLAR_CYCLE_LENGTH] = { + 1996, 1997, 1998, 1971, + 1972, 1973, 1974, 1975, + 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, + 1984, 1985, 1986, 1987, + 1988, 1989, 1990, 1991, + 1992, 1993, 1994, 1995, +}; + +/* This isn't used, but it's handy to look at */ +static const int dow_year_start[SOLAR_CYCLE_LENGTH] = { + 5, 0, 1, 2, /* 0 2016 - 2019 */ + 3, 5, 6, 0, /* 4 */ + 1, 3, 4, 5, /* 8 1996 - 1998, 1971*/ + 6, 1, 2, 3, /* 12 1972 - 1975 */ + 4, 6, 0, 1, /* 16 */ + 2, 4, 5, 6, /* 20 2036, 2037, 2010, 2011 */ + 0, 2, 3, 4 /* 24 2012, 2013, 2014, 2015 */ +}; + +/* Let's assume people are going to be looking for dates in the future. + Let's provide some cheats so you can skip ahead. + This has a 4x speed boost when near 2008. +*/ +/* Number of days since epoch on Jan 1st, 2008 GMT */ +#define CHEAT_DAYS (1199145600 / 24 / 60 / 60) +#define CHEAT_YEARS 108 + +#define IS_LEAP(n) ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0) +#define WRAP(a,b,m) ((a) = ((a) < 0 ) ? ((b)--, (a) + (m)) : (a)) + +#ifdef USE_SYSTEM_LOCALTIME +# define SHOULD_USE_SYSTEM_LOCALTIME(a) ( \ + (a) <= SYSTEM_LOCALTIME_MAX && \ + (a) >= SYSTEM_LOCALTIME_MIN \ +) +#else +# define SHOULD_USE_SYSTEM_LOCALTIME(a) (0) +#endif + +#ifdef USE_SYSTEM_GMTIME +# define SHOULD_USE_SYSTEM_GMTIME(a) ( \ + (a) <= SYSTEM_GMTIME_MAX && \ + (a) >= SYSTEM_GMTIME_MIN \ +) +#else +# define SHOULD_USE_SYSTEM_GMTIME(a) (0) +#endif + +/* Multi varadic macros are a C99 thing, alas */ +#ifdef TIME_64_DEBUG +# define TIME64_TRACE(format) (fprintf(stderr, format)) +# define TIME64_TRACE1(format, var1) (fprintf(stderr, format, var1)) +# define TIME64_TRACE2(format, var1, var2) (fprintf(stderr, format, var1, var2)) +# define TIME64_TRACE3(format, var1, var2, var3) (fprintf(stderr, format, var1, var2, var3)) +#else +# define TIME64_TRACE(format) ((void)0) +# define TIME64_TRACE1(format, var1) ((void)0) +# define TIME64_TRACE2(format, var1, var2) ((void)0) +# define TIME64_TRACE3(format, var1, var2, var3) ((void)0) +#endif + + +static int is_exception_century(Year year) +{ + int is_exception = ((year % 100 == 0) && !(year % 400 == 0)); + TIME64_TRACE1("# is_exception_century: %s\n", is_exception ? "yes" : "no"); + + return(is_exception); +} + + +/* Compare two dates. + The result is like cmp. + Ignores things like gmtoffset and dst +*/ +int cmp_date( const struct TM* left, const struct tm* right ) { + if( left->tm_year > right->tm_year ) + return 1; + else if( left->tm_year < right->tm_year ) + return -1; + + if( left->tm_mon > right->tm_mon ) + return 1; + else if( left->tm_mon < right->tm_mon ) + return -1; + + if( left->tm_mday > right->tm_mday ) + return 1; + else if( left->tm_mday < right->tm_mday ) + return -1; + + if( left->tm_hour > right->tm_hour ) + return 1; + else if( left->tm_hour < right->tm_hour ) + return -1; + + if( left->tm_min > right->tm_min ) + return 1; + else if( left->tm_min < right->tm_min ) + return -1; + + if( left->tm_sec > right->tm_sec ) + return 1; + else if( left->tm_sec < right->tm_sec ) + return -1; + + return 0; +} + + +/* Check if a date is safely inside a range. + The intention is to check if its a few days inside. +*/ +int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { + if( cmp_date(date, min) == -1 ) + return 0; + + if( cmp_date(date, max) == 1 ) + return 0; + + return 1; +} + + +/* timegm() is not in the C or POSIX spec, but it is such a useful + extension I would be remiss in leaving it out. Also I need it + for localtime64() +*/ +Time64_T timegm64(const struct TM *date) { + Time64_T days = 0; + Time64_T seconds = 0; + Year year; + Year orig_year = (Year)date->tm_year; + int cycles = 0; + + if( orig_year > 100 ) { + cycles = (orig_year - 100) / 400; + orig_year -= cycles * 400; + days += (Time64_T)cycles * days_in_gregorian_cycle; + } + else if( orig_year < -300 ) { + cycles = (orig_year - 100) / 400; + orig_year -= cycles * 400; + days += (Time64_T)cycles * days_in_gregorian_cycle; + } + TIME64_TRACE3("# timegm/ cycles: %d, days: %lld, orig_year: %lld\n", cycles, days, orig_year); + + if( orig_year > 70 ) { + year = 70; + while( year < orig_year ) { + days += length_of_year[IS_LEAP(year)]; + year++; + } + } + else if ( orig_year < 70 ) { + year = 69; + do { + days -= length_of_year[IS_LEAP(year)]; + year--; + } while( year >= orig_year ); + } + + days += julian_days_by_month[IS_LEAP(orig_year)][date->tm_mon]; + days += date->tm_mday - 1; + + seconds = days * 60 * 60 * 24; + + seconds += date->tm_hour * 60 * 60; + seconds += date->tm_min * 60; + seconds += date->tm_sec; + + return(seconds); +} + + +static int check_tm(struct TM *tm) +{ + /* Don't forget leap seconds */ + assert(tm->tm_sec >= 0); + assert(tm->tm_sec <= 61); + + assert(tm->tm_min >= 0); + assert(tm->tm_min <= 59); + + assert(tm->tm_hour >= 0); + assert(tm->tm_hour <= 23); + + assert(tm->tm_mday >= 1); + assert(tm->tm_mday <= days_in_month[IS_LEAP(tm->tm_year)][tm->tm_mon]); + + assert(tm->tm_mon >= 0); + assert(tm->tm_mon <= 11); + + assert(tm->tm_wday >= 0); + assert(tm->tm_wday <= 6); + + assert(tm->tm_yday >= 0); + assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]); + +#ifdef HAS_TM_TM_GMTOFF + assert(tm->tm_gmtoff >= -24 * 60 * 60); + assert(tm->tm_gmtoff <= 24 * 60 * 60); +#endif + + return 1; +} + + +/* The exceptional centuries without leap years cause the cycle to + shift by 16 +*/ +static Year cycle_offset(Year year) +{ + const Year start_year = 2000; + Year year_diff = year - start_year; + Year exceptions; + + if( year > start_year ) + year_diff--; + + exceptions = year_diff / 100; + exceptions -= year_diff / 400; + + TIME64_TRACE3("# year: %lld, exceptions: %lld, year_diff: %lld\n", + year, exceptions, year_diff); + + return exceptions * 16; +} + +/* For a given year after 2038, pick the latest possible matching + year in the 28 year calendar cycle. + + A matching year... + 1) Starts on the same day of the week. + 2) Has the same leap year status. + + This is so the calendars match up. + + Also the previous year must match. When doing Jan 1st you might + wind up on Dec 31st the previous year when doing a -UTC time zone. + + Finally, the next year must have the same start day of week. This + is for Dec 31st with a +UTC time zone. + It doesn't need the same leap year status since we only care about + January 1st. +*/ +static int safe_year(const Year year) +{ + int safe_year; + Year year_cycle; + + if( year >= MIN_SAFE_YEAR && year <= MAX_SAFE_YEAR ) { + return (int)year; + } + + year_cycle = year + cycle_offset(year); + + /* safe_years_low is off from safe_years_high by 8 years */ + if( year < MIN_SAFE_YEAR ) + year_cycle -= 8; + + /* Change non-leap xx00 years to an equivalent */ + if( is_exception_century(year) ) + year_cycle += 11; + + /* Also xx01 years, since the previous year will be wrong */ + if( is_exception_century(year - 1) ) + year_cycle += 17; + + year_cycle %= SOLAR_CYCLE_LENGTH; + if( year_cycle < 0 ) + year_cycle = SOLAR_CYCLE_LENGTH + year_cycle; + + assert( year_cycle >= 0 ); + assert( year_cycle < SOLAR_CYCLE_LENGTH ); + if( year < MIN_SAFE_YEAR ) + safe_year = safe_years_low[year_cycle]; + else if( year > MAX_SAFE_YEAR ) + safe_year = safe_years_high[year_cycle]; + else + assert(0); + + TIME64_TRACE3("# year: %lld, year_cycle: %lld, safe_year: %d\n", + year, year_cycle, safe_year); + + assert(safe_year <= MAX_SAFE_YEAR && safe_year >= MIN_SAFE_YEAR); + + return safe_year; +} + + +void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { + if( src == NULL ) { + memset(dest, 0, sizeof(*dest)); + } + else { +# ifdef USE_TM64 + dest->tm_sec = src->tm_sec; + dest->tm_min = src->tm_min; + dest->tm_hour = src->tm_hour; + dest->tm_mday = src->tm_mday; + dest->tm_mon = src->tm_mon; + dest->tm_year = (Year)src->tm_year; + dest->tm_wday = src->tm_wday; + dest->tm_yday = src->tm_yday; + dest->tm_isdst = src->tm_isdst; + +# ifdef HAS_TM_TM_GMTOFF + dest->tm_gmtoff = src->tm_gmtoff; +# endif + +# ifdef HAS_TM_TM_ZONE + dest->tm_zone = src->tm_zone; +# endif + +# else + /* They're the same type */ + memcpy(dest, src, sizeof(*dest)); +# endif + } +} + + +void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { + if( src == NULL ) { + memset(dest, 0, sizeof(*dest)); + } + else { +# ifdef USE_TM64 + dest->tm_sec = src->tm_sec; + dest->tm_min = src->tm_min; + dest->tm_hour = src->tm_hour; + dest->tm_mday = src->tm_mday; + dest->tm_mon = src->tm_mon; + dest->tm_year = (int)src->tm_year; + dest->tm_wday = src->tm_wday; + dest->tm_yday = src->tm_yday; + dest->tm_isdst = src->tm_isdst; + +# ifdef HAS_TM_TM_GMTOFF + dest->tm_gmtoff = src->tm_gmtoff; +# endif + +# ifdef HAS_TM_TM_ZONE + dest->tm_zone = src->tm_zone; +# endif + +# else + /* They're the same type */ + memcpy(dest, src, sizeof(*dest)); +# endif + } +} + + +/* Simulate localtime_r() to the best of our ability */ +struct tm * fake_localtime_r(const time_t *time, struct tm *result) { + const struct tm *static_result = localtime(time); + + assert(result != NULL); + + if( static_result == NULL ) { + memset(result, 0, sizeof(*result)); + return NULL; + } + else { + memcpy(result, static_result, sizeof(*result)); + return result; + } +} + + +/* Simulate gmtime_r() to the best of our ability */ +struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { + const struct tm *static_result = gmtime(time); + + assert(result != NULL); + + if( static_result == NULL ) { + memset(result, 0, sizeof(*result)); + return NULL; + } + else { + memcpy(result, static_result, sizeof(*result)); + return result; + } +} + + +static Time64_T seconds_between_years(Year left_year, Year right_year) { + int increment = (left_year > right_year) ? 1 : -1; + Time64_T seconds = 0; + int cycles; + + if( left_year > 2400 ) { + cycles = (left_year - 2400) / 400; + left_year -= cycles * 400; + seconds += cycles * seconds_in_gregorian_cycle; + } + else if( left_year < 1600 ) { + cycles = (left_year - 1600) / 400; + left_year += cycles * 400; + seconds += cycles * seconds_in_gregorian_cycle; + } + + while( left_year != right_year ) { + seconds += length_of_year[IS_LEAP(right_year - 1900)] * 60 * 60 * 24; + right_year += increment; + } + + return seconds * increment; +} + + +Time64_T mktime64(const struct TM *input_date) { + struct tm safe_date; + struct TM date; + Time64_T time; + Year year = input_date->tm_year + 1900; + + if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) + { + copy_TM64_to_tm(input_date, &safe_date); + return (Time64_T)mktime(&safe_date); + } + + /* Have to make the year safe in date else it won't fit in safe_date */ + date = *input_date; + date.tm_year = safe_year(year) - 1900; + copy_TM64_to_tm(&date, &safe_date); + + time = (Time64_T)mktime(&safe_date); + + time += seconds_between_years(year, (Year)(safe_date.tm_year + 1900)); + + return time; +} + + +/* Because I think mktime() is a crappy name */ +Time64_T timelocal64(const struct TM *date) { + return mktime64(date); +} + + +struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) +{ + int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; + Time64_T v_tm_tday; + int leap; + Time64_T m; + Time64_T time = *in_time; + Year year = 70; + int cycles = 0; + + assert(p != NULL); + + /* Use the system gmtime() if time_t is small enough */ + if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) { + time_t safe_time = (time_t)*in_time; + struct tm safe_date; + GMTIME_R(&safe_time, &safe_date); + + copy_tm_to_TM64(&safe_date, p); + assert(check_tm(p)); + + return p; + } + +#ifdef HAS_TM_TM_GMTOFF + p->tm_gmtoff = 0; +#endif + p->tm_isdst = 0; + +#ifdef HAS_TM_TM_ZONE + p->tm_zone = "UTC"; +#endif + + v_tm_sec = (int)(time % 60); + time /= 60; + v_tm_min = (int)(time % 60); + time /= 60; + v_tm_hour = (int)(time % 24); + time /= 24; + v_tm_tday = time; + + WRAP (v_tm_sec, v_tm_min, 60); + WRAP (v_tm_min, v_tm_hour, 60); + WRAP (v_tm_hour, v_tm_tday, 24); + + v_tm_wday = (int)((v_tm_tday + 4) % 7); + if (v_tm_wday < 0) + v_tm_wday += 7; + m = v_tm_tday; + + if (m >= CHEAT_DAYS) { + year = CHEAT_YEARS; + m -= CHEAT_DAYS; + } + + if (m >= 0) { + /* Gregorian cycles, this is huge optimization for distant times */ + cycles = (int)(m / (Time64_T) days_in_gregorian_cycle); + if( cycles ) { + m -= (cycles * (Time64_T) days_in_gregorian_cycle); + year += (cycles * years_in_gregorian_cycle); + } + + /* Years */ + leap = IS_LEAP (year); + while (m >= (Time64_T) length_of_year[leap]) { + m -= (Time64_T) length_of_year[leap]; + year++; + leap = IS_LEAP (year); + } + + /* Months */ + v_tm_mon = 0; + while (m >= (Time64_T) days_in_month[leap][v_tm_mon]) { + m -= (Time64_T) days_in_month[leap][v_tm_mon]; + v_tm_mon++; + } + } else { + year--; + + /* Gregorian cycles */ + cycles = (int)((m / (Time64_T) days_in_gregorian_cycle) + 1); + if( cycles ) { + m -= (cycles * (Time64_T) days_in_gregorian_cycle); + year += (cycles * years_in_gregorian_cycle); + } + + /* Years */ + leap = IS_LEAP (year); + while (m < (Time64_T) -length_of_year[leap]) { + m += (Time64_T) length_of_year[leap]; + year--; + leap = IS_LEAP (year); + } + + /* Months */ + v_tm_mon = 11; + while (m < (Time64_T) -days_in_month[leap][v_tm_mon]) { + m += (Time64_T) days_in_month[leap][v_tm_mon]; + v_tm_mon--; + } + m += (Time64_T) days_in_month[leap][v_tm_mon]; + } + + p->tm_year = year; + if( p->tm_year != year ) { +#ifdef EOVERFLOW + errno = EOVERFLOW; +#endif + return NULL; + } + + /* At this point m is less than a year so casting to an int is safe */ + p->tm_mday = (int) m + 1; + p->tm_yday = julian_days_by_month[leap][v_tm_mon] + (int)m; + p->tm_sec = v_tm_sec; + p->tm_min = v_tm_min; + p->tm_hour = v_tm_hour; + p->tm_mon = v_tm_mon; + p->tm_wday = v_tm_wday; + + assert(check_tm(p)); + + return p; +} + + +struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) +{ + time_t safe_time; + struct tm safe_date; + struct TM gm_tm; + Year orig_year; + int month_diff; + + assert(local_tm != NULL); + + /* Use the system localtime() if time_t is small enough */ + if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) { + safe_time = (time_t)*time; + + TIME64_TRACE1("Using system localtime for %lld\n", *time); + + LOCALTIME_R(&safe_time, &safe_date); + + copy_tm_to_TM64(&safe_date, local_tm); + assert(check_tm(local_tm)); + + return local_tm; + } + + if( gmtime64_r(time, &gm_tm) == NULL ) { + TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); + return NULL; + } + + orig_year = gm_tm.tm_year; + + if (gm_tm.tm_year > (2037 - 1900) || + gm_tm.tm_year < (1970 - 1900) + ) + { + TIME64_TRACE1("Mapping tm_year %lld to safe_year\n", (Year)gm_tm.tm_year); + gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; + } + + safe_time = (time_t)timegm64(&gm_tm); + if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { + TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); + return NULL; + } + + copy_tm_to_TM64(&safe_date, local_tm); + + local_tm->tm_year = orig_year; + if( local_tm->tm_year != orig_year ) { + TIME64_TRACE2("tm_year overflow: tm_year %lld, orig_year %lld\n", + (Year)local_tm->tm_year, (Year)orig_year); + +#ifdef EOVERFLOW + errno = EOVERFLOW; +#endif + return NULL; + } + + + month_diff = local_tm->tm_mon - gm_tm.tm_mon; + + /* When localtime is Dec 31st previous year and + gmtime is Jan 1st next year. + */ + if( month_diff == 11 ) { + local_tm->tm_year--; + } + + /* When localtime is Jan 1st, next year and + gmtime is Dec 31st, previous year. + */ + if( month_diff == -11 ) { + local_tm->tm_year++; + } + + /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st + in a non-leap xx00. There is one point in the cycle + we can't account for which the safe xx00 year is a leap + year. So we need to correct for Dec 31st comming out as + the 366th day of the year. + */ + if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) + local_tm->tm_yday--; + + assert(check_tm(local_tm)); + + return local_tm; +} + + +int valid_tm_wday( const struct TM* date ) { + if( 0 <= date->tm_wday && date->tm_wday <= 6 ) + return 1; + else + return 0; +} + +int valid_tm_mon( const struct TM* date ) { + if( 0 <= date->tm_mon && date->tm_mon <= 11 ) + return 1; + else + return 0; +} + + +char *asctime64_r( const struct TM* date, char *result ) { + /* I figure everything else can be displayed, even hour 25, but if + these are out of range we walk off the name arrays */ + if( !valid_tm_wday(date) || !valid_tm_mon(date) ) + return NULL; + + sprintf(result, TM64_ASCTIME_FORMAT, + wday_name[date->tm_wday], + mon_name[date->tm_mon], + date->tm_mday, date->tm_hour, + date->tm_min, date->tm_sec, + 1900 + date->tm_year); + + return result; +} + + +char *ctime64_r( const Time64_T* time, char* result ) { + struct TM date; + + localtime64_r( time, &date ); + return asctime64_r( &date, result ); +} + + +/* Non-thread safe versions of the above */ +struct TM *localtime64(const Time64_T *time) { + tzset(); + return localtime64_r(time, &Static_Return_Date); +} + +struct TM *gmtime64(const Time64_T *time) { + return gmtime64_r(time, &Static_Return_Date); +} + +char *asctime64( const struct TM* date ) { + return asctime64_r( date, Static_Return_String ); +} + +char *ctime64( const Time64_T* time ) { + tzset(); + return asctime64(localtime64(time)); +} diff -Nru pymongo-1.11/pymongo/time64_config.h pymongo-1.7/pymongo/time64_config.h --- pymongo-1.11/pymongo/time64_config.h 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/time64_config.h 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,78 @@ +/* Configuration + ------------- + Define as appropriate for your system. + Sensible defaults provided. +*/ + + +#ifndef TIME64_CONFIG_H +# define TIME64_CONFIG_H + +/* Debugging + TIME_64_DEBUG + Define if you want debugging messages +*/ +/* #define TIME_64_DEBUG */ + + +/* INT_64_T + A 64 bit integer type to use to store time and others. + Must be defined. +*/ +#define INT_64_T long long + + +/* USE_TM64 + Should we use a 64 bit safe replacement for tm? This will + let you go past year 2 billion but the struct will be incompatible + with tm. Conversion functions will be provided. +*/ +/* #define USE_TM64 */ + + +/* Availability of system functions. + + HAS_GMTIME_R + Define if your system has gmtime_r() + + HAS_LOCALTIME_R + Define if your system has localtime_r() + + HAS_TIMEGM + Define if your system has timegm(), a GNU extension. +*/ +#if !defined(WIN32) && !defined(_MSC_VER) +#define HAS_GMTIME_R +#define HAS_LOCALTIME_R +#endif +/* #define HAS_TIMEGM */ + + +/* Details of non-standard tm struct elements. + + HAS_TM_TM_GMTOFF + True if your tm struct has a "tm_gmtoff" element. + A BSD extension. + + HAS_TM_TM_ZONE + True if your tm struct has a "tm_zone" element. + A BSD extension. +*/ +/* #define HAS_TM_TM_GMTOFF */ +/* #define HAS_TM_TM_ZONE */ + + +/* USE_SYSTEM_LOCALTIME + USE_SYSTEM_GMTIME + USE_SYSTEM_MKTIME + USE_SYSTEM_TIMEGM + Should we use the system functions if the time is inside their range? + Your system localtime() is probably more accurate, but our gmtime() is + fast and safe. +*/ +#define USE_SYSTEM_LOCALTIME +/* #define USE_SYSTEM_GMTIME */ +#define USE_SYSTEM_MKTIME +/* #define USE_SYSTEM_TIMEGM */ + +#endif /* TIME64_CONFIG_H */ diff -Nru pymongo-1.11/pymongo/time64.h pymongo-1.7/pymongo/time64.h --- pymongo-1.11/pymongo/time64.h 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/time64.h 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,81 @@ +#ifndef TIME64_H +# define TIME64_H + +#include +#include "time64_config.h" + +/* Set our custom types */ +typedef INT_64_T Int64; +typedef Int64 Time64_T; +typedef Int64 Year; + + +/* A copy of the tm struct but with a 64 bit year */ +struct TM64 { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + Year tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; + +#ifdef HAS_TM_TM_GMTOFF + long tm_gmtoff; +#endif + +#ifdef HAS_TM_TM_ZONE + char *tm_zone; +#endif +}; + + +/* Decide which tm struct to use */ +#ifdef USE_TM64 +#define TM TM64 +#else +#define TM tm +#endif + + +/* Declare public functions */ +struct TM *gmtime64_r (const Time64_T *, struct TM *); +struct TM *localtime64_r (const Time64_T *, struct TM *); +struct TM *gmtime64 (const Time64_T *); +struct TM *localtime64 (const Time64_T *); + +char *asctime64 (const struct TM *); +char *asctime64_r (const struct TM *, char *); + +char *ctime64 (const Time64_T*); +char *ctime64_r (const Time64_T*, char*); + +Time64_T timegm64 (const struct TM *); +Time64_T mktime64 (const struct TM *); +Time64_T timelocal64 (const struct TM *); + + +/* Not everyone has gm/localtime_r(), provide a replacement */ +#ifdef HAS_LOCALTIME_R +# define LOCALTIME_R(clock, result) localtime_r(clock, result) +#else +# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) +#endif +#ifdef HAS_GMTIME_R +# define GMTIME_R(clock, result) gmtime_r(clock, result) +#else +# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) +#endif + + +/* Use a different asctime format depending on how big the year is */ +#ifdef USE_TM64 + #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %lld\n" +#else + #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %d\n" +#endif + + +#endif diff -Nru pymongo-1.11/pymongo/time64_limits.h pymongo-1.7/pymongo/time64_limits.h --- pymongo-1.11/pymongo/time64_limits.h 1970-01-01 00:00:00.000000000 +0000 +++ pymongo-1.7/pymongo/time64_limits.h 2010-05-19 14:01:01.000000000 +0000 @@ -0,0 +1,95 @@ +/* + Maximum and minimum inputs your system's respective time functions + can correctly handle. time64.h will use your system functions if + the input falls inside these ranges and corresponding USE_SYSTEM_* + constant is defined. +*/ + +#ifndef TIME64_LIMITS_H +#define TIME64_LIMITS_H + +/* Max/min for localtime() */ +#define SYSTEM_LOCALTIME_MAX 2147483647 +#define SYSTEM_LOCALTIME_MIN -2147483647-1 + +/* Max/min for gmtime() */ +#define SYSTEM_GMTIME_MAX 2147483647 +#define SYSTEM_GMTIME_MIN -2147483647-1 + +/* Max/min for mktime() */ +static const struct tm SYSTEM_MKTIME_MAX = { + 7, + 14, + 19, + 18, + 0, + 138, + 1, + 17, + 0 +#ifdef HAS_TM_TM_GMTOFF + ,-28800 +#endif +#ifdef HAS_TM_TM_ZONE + ,"PST" +#endif +}; + +static const struct tm SYSTEM_MKTIME_MIN = { + 52, + 45, + 12, + 13, + 11, + 1, + 5, + 346, + 0 +#ifdef HAS_TM_TM_GMTOFF + ,-28800 +#endif +#ifdef HAS_TM_TM_ZONE + ,"PST" +#endif +}; + +/* Max/min for timegm() */ +#ifdef HAS_TIMEGM +static const struct tm SYSTEM_TIMEGM_MAX = { + 7, + 14, + 3, + 19, + 0, + 138, + 2, + 18, + 0 + #ifdef HAS_TM_TM_GMTOFF + ,0 + #endif + #ifdef HAS_TM_TM_ZONE + ,"UTC" + #endif +}; + +static const struct tm SYSTEM_TIMEGM_MIN = { + 52, + 45, + 20, + 13, + 11, + 1, + 5, + 346, + 0 + #ifdef HAS_TM_TM_GMTOFF + ,0 + #endif + #ifdef HAS_TM_TM_ZONE + ,"UTC" + #endif +}; +#endif /* HAS_TIMEGM */ + +#endif /* TIME64_LIMITS_H */ diff -Nru pymongo-1.11/pymongo/timestamp.py pymongo-1.7/pymongo/timestamp.py --- pymongo-1.11/pymongo/timestamp.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/timestamp.py 2010-06-17 15:37:47.000000000 +0000 @@ -1,4 +1,4 @@ -# Copyright 2009-2010 10gen, Inc. +# Copyright 2010 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,79 @@ # See the License for the specific language governing permissions and # limitations under the License. -from bson.timestamp import * +"""Tools for representing MongoDB internal Timestamps. + +.. versionadded:: 1.5 +""" + +import calendar +import datetime + + +class Timestamp(object): + """MongoDB internal timestamps used in the opLog. + """ + + def __init__(self, time, inc): + """Create a new :class:`Timestamp`. + + This class is only for use with the MongoDB opLog. If you need + to store a regular timestamp, please use a + :class:`~datetime.datetime`. + + Raises :class:`TypeError` if `time` is not an instance of + :class: `int` or :class:`~datetime.datetime`, or `inc` is not + an instance of :class:`int`. Raises :class:`ValueError` if + `time` or `inc` is not in [0, 2**32). + + :Parameters: + - `time`: time in seconds since epoch UTC, or a naive UTC + :class:`~datetime.datetime`, or an aware + :class:`~datetime.datetime` + - `inc`: the incrementing counter + + .. versionchanged:: 1.7 + `time` can be a :class:`~datetime.datetime` instance + """ + if isinstance(time, datetime.datetime): + if time.utcoffset() is not None: + time = time - time.utcoffset() + time = int(calendar.timegm(time.timetuple())) + if not isinstance(time, (int, long)): + raise TypeError("time must be an instance of int") + if not isinstance(inc, (int, long)): + raise TypeError("inc must be an instance of int") + if not 0 <= time < 2 ** 32: + raise ValueError("time must be contained in [0, 2**32)") + if not 0 <= inc < 2 ** 32: + raise ValueError("inc must be contained in [0, 2**32)") + + self.__time = time + self.__inc = inc + + @property + def time(self): + """Get the time portion of this :class:`Timestamp`. + """ + return self.__time + + @property + def inc(self): + """Get the inc portion of this :class:`Timestamp`. + """ + return self.__inc + + def __eq__(self, other): + if isinstance(other, Timestamp): + return (self.__time == other.time and self.__inc == other.inc) + else: + return NotImplemented + + def __repr__(self): + return "Timestamp(%s, %s)" % (self.__time, self.__inc) + + def as_datetime(self): + """Return a :class:`~datetime.datetime` instance corresponding + to the time portion of this :class:`Timestamp`. + """ + return datetime.datetime.utcfromtimestamp(self.__time) diff -Nru pymongo-1.11/pymongo/tz_util.py pymongo-1.7/pymongo/tz_util.py --- pymongo-1.11/pymongo/tz_util.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/pymongo/tz_util.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -# Copyright 2009-2010 10gen, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from bson.tz_util import * diff -Nru pymongo-1.11/pymongo.egg-info/PKG-INFO pymongo-1.7/pymongo.egg-info/PKG-INFO --- pymongo-1.11/pymongo.egg-info/PKG-INFO 2011-05-05 22:31:25.000000000 +0000 +++ pymongo-1.7/pymongo.egg-info/PKG-INFO 2010-06-17 16:50:48.000000000 +0000 @@ -1,39 +1,26 @@ Metadata-Version: 1.0 Name: pymongo -Version: 1.11 +Version: 1.7 Summary: Python driver for MongoDB Home-page: http://github.com/mongodb/mongo-python-driver -Author: Bernie Hackett -Author-email: bernie@10gen.com +Author: Mike Dirolf +Author-email: mongodb-user@googlegroups.com License: Apache License, Version 2.0 Description: ======= PyMongo ======= :Info: See `the mongo site `_ for more information. See `github `_ for the latest source. - :Author: Mike Dirolf - :Maintainer: Bernie Hackett + :Author: Mike Dirolf About ===== The PyMongo distribution contains tools for interacting with MongoDB - database from Python. The ``bson`` package is an implementation of - the `BSON format `_ for Python. The ``pymongo`` - package is a native Python driver for MongoDB. The ``gridfs`` package - is a `gridfs + database from Python. The ``pymongo`` package is a native Python + driver for MongoDB. The ``gridfs`` package is a `gridfs `_ implementation on top of ``pymongo``. - Issues / Questions / Feedback - ============================= - - Any issues with, questions about, or feedback for PyMongo should be - sent to the mongodb-user list on Google Groups. For confirmed issues - or feature requests, open a case on `jira - `_. Please do not e-mail any of the PyMongo - developers directly with issues or questions - you're more likely to - get an answer on the list. - Installation ============ @@ -109,7 +96,7 @@ .. _sphinx: http://sphinx.pocoo.org/ -Keywords: mongo,mongodb,pymongo,gridfs,bson +Keywords: mongo,mongodb,pymongo,gridfs Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers diff -Nru pymongo-1.11/pymongo.egg-info/SOURCES.txt pymongo-1.7/pymongo.egg-info/SOURCES.txt --- pymongo-1.11/pymongo.egg-info/SOURCES.txt 2011-05-05 22:31:25.000000000 +0000 +++ pymongo-1.7/pymongo.egg-info/SOURCES.txt 2010-06-17 16:50:49.000000000 +0000 @@ -2,30 +2,7 @@ MANIFEST.in README.rst ez_setup.py -setup.cfg setup.py -bson/__init__.py -bson/_cbson.h -bson/_cbsonmodule.c -bson/binary.py -bson/buffer.c -bson/buffer.h -bson/code.py -bson/dbref.py -bson/encoding_helpers.c -bson/encoding_helpers.h -bson/errors.py -bson/json_util.py -bson/max_key.py -bson/min_key.py -bson/objectid.py -bson/son.py -bson/time64.c -bson/time64.h -bson/time64_config.h -bson/time64_limits.h -bson/timestamp.py -bson/tz_util.py doc/__init__.py doc/changelog.rst doc/conf.py @@ -37,18 +14,6 @@ doc/tools.rst doc/tutorial.rst doc/api/index.rst -doc/api/bson/binary.rst -doc/api/bson/code.rst -doc/api/bson/dbref.rst -doc/api/bson/errors.rst -doc/api/bson/index.rst -doc/api/bson/json_util.rst -doc/api/bson/max_key.rst -doc/api/bson/min_key.rst -doc/api/bson/objectid.rst -doc/api/bson/son.rst -doc/api/bson/timestamp.rst -doc/api/bson/tz_util.rst doc/api/gridfs/errors.rst doc/api/gridfs/grid_file.rst doc/api/gridfs/index.rst @@ -72,19 +37,18 @@ doc/api/pymongo/son.rst doc/api/pymongo/son_manipulator.rst doc/api/pymongo/timestamp.rst -doc/api/pymongo/tz_util.rst doc/examples/custom_type.rst doc/examples/geo.rst doc/examples/gridfs.rst doc/examples/index.rst doc/examples/map_reduce.rst -doc/examples/replica_set.rst gridfs/__init__.py gridfs/errors.py gridfs/grid_file.py pymongo/__init__.py -pymongo/_cmessagemodule.c +pymongo/_cbsonmodule.c pymongo/binary.py +pymongo/bson.py pymongo/code.py pymongo/collection.py pymongo/connection.py @@ -92,6 +56,8 @@ pymongo/cursor_manager.py pymongo/database.py pymongo/dbref.py +pymongo/encoding_helpers.c +pymongo/encoding_helpers.h pymongo/errors.py pymongo/helpers.py pymongo/json_util.py @@ -102,8 +68,11 @@ pymongo/objectid.py pymongo/son.py pymongo/son_manipulator.py +pymongo/time64.c +pymongo/time64.h +pymongo/time64_config.h +pymongo/time64_limits.h pymongo/timestamp.py -pymongo/tz_util.py pymongo.egg-info/PKG-INFO pymongo.egg-info/SOURCES.txt pymongo.egg-info/dependency_links.txt diff -Nru pymongo-1.11/pymongo.egg-info/top_level.txt pymongo-1.7/pymongo.egg-info/top_level.txt --- pymongo-1.11/pymongo.egg-info/top_level.txt 2011-05-05 22:31:25.000000000 +0000 +++ pymongo-1.7/pymongo.egg-info/top_level.txt 2010-06-17 16:50:48.000000000 +0000 @@ -1,3 +1,2 @@ -bson pymongo gridfs diff -Nru pymongo-1.11/README.rst pymongo-1.7/README.rst --- pymongo-1.11/README.rst 2011-03-18 00:04:57.000000000 +0000 +++ pymongo-1.7/README.rst 2010-05-19 14:01:01.000000000 +0000 @@ -2,30 +2,17 @@ PyMongo ======= :Info: See `the mongo site `_ for more information. See `github `_ for the latest source. -:Author: Mike Dirolf -:Maintainer: Bernie Hackett +:Author: Mike Dirolf About ===== The PyMongo distribution contains tools for interacting with MongoDB -database from Python. The ``bson`` package is an implementation of -the `BSON format `_ for Python. The ``pymongo`` -package is a native Python driver for MongoDB. The ``gridfs`` package -is a `gridfs +database from Python. The ``pymongo`` package is a native Python +driver for MongoDB. The ``gridfs`` package is a `gridfs `_ implementation on top of ``pymongo``. -Issues / Questions / Feedback -============================= - -Any issues with, questions about, or feedback for PyMongo should be -sent to the mongodb-user list on Google Groups. For confirmed issues -or feature requests, open a case on `jira -`_. Please do not e-mail any of the PyMongo -developers directly with issues or questions - you're more likely to -get an answer on the list. - Installation ============ diff -Nru pymongo-1.11/setup.cfg pymongo-1.7/setup.cfg --- pymongo-1.11/setup.cfg 2011-05-05 22:31:25.000000000 +0000 +++ pymongo-1.7/setup.cfg 2010-06-17 16:50:49.000000000 +0000 @@ -1,6 +1,3 @@ -[nosetests] -with-xunit = 1 - [egg_info] tag_build = tag_date = 0 diff -Nru pymongo-1.11/setup.py pymongo-1.7/setup.py --- pymongo-1.11/setup.py 2011-05-05 22:09:54.000000000 +0000 +++ pymongo-1.7/setup.py 2010-06-17 15:37:47.000000000 +0000 @@ -20,7 +20,7 @@ from distutils.core import Extension # Remember to change in pymongo/__init__.py as well! -version = "1.11" +version = "1.7" f = open("README.rst") try: @@ -62,7 +62,7 @@ pass if has_subprocess: - status = subprocess.call(["sphinx-build", "-E", "-b", mode, "doc", path]) + status = subprocess.call(["sphinx-build", "-b", mode, "doc", path]) if status: raise RuntimeError("documentation step '%s' failed" % mode) @@ -79,12 +79,12 @@ if sys.platform == 'win32' and sys.version_info > (2, 6): - # 2.6's distutils.msvc9compiler can raise an IOError when failing to - # find the compiler - build_errors = (CCompilerError, DistutilsExecError, - DistutilsPlatformError, IOError) + # 2.6's distutils.msvc9compiler can raise an IOError when failing to + # find the compiler + build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, + IOError) else: - build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) + build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) class custom_build_ext(build_ext): @@ -99,10 +99,6 @@ be compiled. No C extensions are essential for PyMongo to run, although they do result in significant speed improvements. -If you are seeing this message on Linux you probably need to -install GCC and/or the Python development libraries for your -version of Python. - %s ************************************************************** """ @@ -114,17 +110,14 @@ print e print self.warning_message % ("Extension modules", "There was an issue with your " - "platform configuration " - "- see above.") + "platform configuration - see above.") def build_extension(self, ext): if sys.version_info[:3] >= (2, 4, 0): try: build_ext.build_extension(self, ext) - except build_errors, e: - print e - print self.warning_message % ("The %s extension " - "module" % ext.name, + except build_errors: + print self.warning_message % ("The %s extension module" % ext.name, "Above is the ouput showing how " "the compilation failed.") else: @@ -133,32 +126,24 @@ "advantage of the extension.") c_ext = Feature( - "optional C extensions", + "optional C extension", standard=True, - ext_modules=[Extension('bson._cbson', - include_dirs=['bson'], - sources=['bson/_cbsonmodule.c', - 'bson/time64.c', - 'bson/buffer.c', - 'bson/encoding_helpers.c']), - Extension('pymongo._cmessage', - include_dirs=['bson'], - sources=['pymongo/_cmessagemodule.c', - 'bson/_cbsonmodule.c', - 'bson/time64.c', - 'bson/buffer.c', - 'bson/encoding_helpers.c'])]) + ext_modules=[Extension('pymongo._cbson', + include_dirs=['pymongo'], + sources=['pymongo/_cbsonmodule.c', + 'pymongo/time64.c', + 'pymongo/encoding_helpers.c'])]) if "--no_ext" in sys.argv: sys.argv = [x for x in sys.argv if x != "--no_ext"] features = {} elif sys.byteorder == "big": print """ -***************************************************** -The optional C extensions are currently not supported +*************************************************** +The optional C extension is currently not supported on big endian platforms and will not be built. Performance may be degraded. -***************************************************** +*************************************************** """ features = {} else: @@ -171,11 +156,9 @@ long_description=readme_content, author="Mike Dirolf", author_email="mongodb-user@googlegroups.com", - maintainer="Bernie Hackett", - maintainer_email="bernie@10gen.com", url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - packages=["bson", "pymongo", "gridfs"], + keywords=["mongo", "mongodb", "pymongo", "gridfs"], + packages=["pymongo", "gridfs"], install_requires=[], features=features, license="Apache License, Version 2.0", diff -Nru pymongo-1.11/test/__init__.py pymongo-1.7/test/__init__.py --- pymongo-1.11/test/__init__.py 2011-04-06 17:53:30.000000000 +0000 +++ pymongo-1.7/test/__init__.py 2010-05-19 14:01:01.000000000 +0000 @@ -17,12 +17,9 @@ from test_connection import get_connection - def teardown(): c = get_connection() c.drop_database("pymongo-pooling-tests") c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") c.drop_database("pymongo_test_mike") diff -Nru pymongo-1.11/test/qcheck.py pymongo-1.7/test/qcheck.py --- pymongo-1.11/test/qcheck.py 2011-04-06 17:54:20.000000000 +0000 +++ pymongo-1.7/test/qcheck.py 2010-05-19 14:01:01.000000000 +0000 @@ -19,10 +19,10 @@ import sys sys.path[0:0] = [""] -from bson.binary import Binary -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.son import SON +from pymongo.binary import Binary +from pymongo.objectid import ObjectId +from pymongo.dbref import DBRef +from pymongo.son import SON gen_target = 100 reduction_attempts = 10 @@ -168,9 +168,9 @@ gen_range(0, 10)), SON) -def simplify(case): # TODO this is a hack +def simplify(case): # TODO this is a hack if isinstance(case, SON) and "$ref" not in case: - simplified = SON(case) # make a copy! + simplified = SON(case) # make a copy! if random.choice([True, False]): # delete if not len(simplified.keys()): diff -Nru pymongo-1.11/test/test_binary.py pymongo-1.7/test/test_binary.py --- pymongo-1.11/test/test_binary.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/test/test_binary.py 2010-06-17 15:34:30.000000000 +0000 @@ -18,7 +18,7 @@ import sys sys.path[0:0] = [""] -from bson.binary import Binary +from pymongo.binary import Binary class TestBinary(unittest.TestCase): @@ -32,7 +32,7 @@ self.assert_(a_binary.startswith("hello")) self.assert_(a_binary.endswith("world")) self.assert_(isinstance(a_binary, Binary)) - self.assertFalse(isinstance(a_string, Binary)) + self.failIf(isinstance(a_string, Binary)) def test_exceptions(self): self.assertRaises(TypeError, Binary, None) diff -Nru pymongo-1.11/test/test_bson.py pymongo-1.7/test/test_bson.py --- pymongo-1.11/test/test_bson.py 2011-04-06 18:01:28.000000000 +0000 +++ pymongo-1.7/test/test_bson.py 2010-06-17 15:34:46.000000000 +0000 @@ -29,25 +29,25 @@ from nose.plugins.skip import SkipTest -import bson -from bson import (BSON, - decode_all, - is_valid) -from bson.binary import Binary -from bson.code import Code -from bson.objectid import ObjectId -from bson.dbref import DBRef -from bson.son import SON -from bson.timestamp import Timestamp -from bson.errors import (InvalidDocument, - InvalidStringData) -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.tz_util import (FixedOffset, - utc) import pymongo +from pymongo.binary import Binary +from pymongo.code import Code +from pymongo.objectid import ObjectId +from pymongo.dbref import DBRef +from pymongo.son import SON +from pymongo.timestamp import Timestamp +from pymongo.bson import BSON, is_valid, _to_dicts +from pymongo.errors import InvalidDocument, InvalidStringData +from pymongo.max_key import MaxKey +from pymongo.min_key import MinKey import qcheck +class SomeZone(datetime.tzinfo): + def utcoffset(self, dt): + return datetime.timedelta(minutes=555) + def dst(self, dt): + # a fixed-offset class: doesn't account for DST + return datetime.timedelta(0) class TestBSON(unittest.TestCase): @@ -59,104 +59,105 @@ self.assertRaises(TypeError, is_valid, u"test") self.assertRaises(TypeError, is_valid, 10.4) - self.assertFalse(is_valid("test")) + self.failIf(is_valid("test")) # the simplest valid BSON document self.assert_(is_valid("\x05\x00\x00\x00\x00")) self.assert_(is_valid(BSON("\x05\x00\x00\x00\x00"))) - self.assertFalse(is_valid("\x04\x00\x00\x00\x00")) - self.assertFalse(is_valid("\x05\x00\x00\x00\x01")) - self.assertFalse(is_valid("\x05\x00\x00\x00")) - self.assertFalse(is_valid("\x05\x00\x00\x00\x00\x00")) + self.failIf(is_valid("\x04\x00\x00\x00\x00")) + self.failIf(is_valid("\x05\x00\x00\x00\x01")) + self.failIf(is_valid("\x05\x00\x00\x00")) + self.failIf(is_valid("\x05\x00\x00\x00\x00\x00")) def test_random_data_is_not_bson(self): qcheck.check_unittest(self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40))) - def test_basic_decode(self): + def test_basic_to_dict(self): self.assertEqual({"test": u"hello world"}, BSON("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" "\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" - "\x72\x6C\x64\x00\x00").decode()) + "\x72\x6C\x64\x00\x00").to_dict()) self.assertEqual([{"test": u"hello world"}, {}], - decode_all("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00" - "\x0C\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20" - "\x77\x6F\x72\x6C\x64\x00\x00\x05\x00\x00" - "\x00\x00")) + _to_dicts("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00" + "\x0C\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20" + "\x77\x6F\x72\x6C\x64\x00\x00\x05\x00\x00" + "\x00\x00")) def test_data_timestamp(self): self.assertEqual({"test": Timestamp(4, 20)}, BSON("\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" - "\x00\x00\x00\x04\x00\x00\x00\x00").decode()) + "\x00\x00\x00\x04\x00\x00\x00\x00").to_dict()) - def test_basic_encode(self): - self.assertRaises(TypeError, BSON.encode, 100) - self.assertRaises(TypeError, BSON.encode, "hello") - self.assertRaises(TypeError, BSON.encode, None) - self.assertRaises(TypeError, BSON.encode, []) + def test_basic_from_dict(self): + self.assertRaises(TypeError, BSON.from_dict, 100) + self.assertRaises(TypeError, BSON.from_dict, "hello") + self.assertRaises(TypeError, BSON.from_dict, None) + self.assertRaises(TypeError, BSON.from_dict, []) - self.assertEqual(BSON.encode({}), BSON("\x05\x00\x00\x00\x00")) - self.assertEqual(BSON.encode({"test": u"hello world"}), + self.assertEqual(BSON.from_dict({}), BSON("\x05\x00\x00\x00\x00")) + self.assertEqual(BSON.from_dict({"test": u"hello world"}), "\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00\x00" "\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C\x64\x00" "\x00") - self.assertEqual(BSON.encode({u"mike": 100}), + self.assertEqual(BSON.from_dict({u"mike": 100}), "\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00" "\x00\x00") - self.assertEqual(BSON.encode({"hello": 1.5}), + self.assertEqual(BSON.from_dict({"hello": 1.5}), "\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00" "\x00\x00\x00\x00\xF8\x3F\x00") - self.assertEqual(BSON.encode({"true": True}), + self.assertEqual(BSON.from_dict({"true": True}), "\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00") - self.assertEqual(BSON.encode({"false": False}), + self.assertEqual(BSON.from_dict({"false": False}), "\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" "\x00") - self.assertEqual(BSON.encode({"empty": []}), + self.assertEqual(BSON.from_dict({"empty": []}), "\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00" "\x00\x00\x00\x00") - self.assertEqual(BSON.encode({"none": {}}), + self.assertEqual(BSON.from_dict({"none": {}}), "\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00" "\x00\x00\x00") - self.assertEqual(BSON.encode({"test": Binary("test", 0)}), + self.assertEqual(BSON.from_dict({"test": Binary("test", 0)}), "\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00" "\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(BSON.encode({"test": Binary("test")}), + self.assertEqual(BSON.from_dict({"test": Binary("test")}), "\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00\x00" "\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(BSON.encode({"test": Binary("test", 128)}), + self.assertEqual(BSON.from_dict({"test": Binary("test", 128)}), "\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00" "\x00\x80\x74\x65\x73\x74\x00") - self.assertEqual(BSON.encode({"test": None}), + self.assertEqual(BSON.from_dict({"test": None}), "\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") - self.assertEqual(BSON.encode({"date": datetime.datetime(2007, 1, 8, - 0, 30, 11)}), + self.assertEqual(BSON.from_dict({"date": datetime.datetime(2007, 1, 8, + 0, 30, + 11)}), "\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C" "\xFF\x0F\x01\x00\x00\x00") - self.assertEqual(BSON.encode({"regex": re.compile("a*b", - re.IGNORECASE)}), + self.assertEqual(BSON.from_dict({"regex": re.compile("a*b", + re.IGNORECASE)}), "\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A" "\x62\x00\x69\x00\x00") - self.assertEqual(BSON.encode({"$where": Code("test")}), + self.assertEqual(BSON.from_dict({"$where": Code("test")}), "\x1F\x00\x00\x00\x0F\x24\x77\x68\x65\x72\x65\x00\x12" "\x00\x00\x00\x05\x00\x00\x00\x74\x65\x73\x74\x00\x05" "\x00\x00\x00\x00\x00") a = ObjectId("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B") - self.assertEqual(BSON.encode({"oid": a}), + self.assertEqual(BSON.from_dict({"oid": a}), "\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02\x03" "\x04\x05\x06\x07\x08\x09\x0A\x0B\x00") - self.assertEqual(BSON.encode({"ref": DBRef("coll", a)}), + self.assertEqual(BSON.from_dict({"ref": DBRef("coll", a)}), "\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02$ref" "\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00\x01\x02" "\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00\x00") - def test_encode_then_decode(self): + def test_from_then_to_dict(self): def helper(dict): - self.assertEqual(dict, (BSON.encode(dict)).decode()) + self.assertEqual(dict, (BSON.from_dict(dict)).to_dict()) helper({}) helper({"test": u"hello"}) - self.assert_(isinstance(BSON.encode({"hello": "world"}) - .decode()["hello"], + self.assert_(isinstance(BSON.from_dict({"hello": "world"}) + .to_dict()["hello"], unicode)) helper({"mike": -10120}) helper({"long": long(10)}) @@ -173,84 +174,46 @@ helper(SON([(u'test dst', datetime.datetime(1993, 4, 4, 2))])) helper({"big float": float(10000000000)}) helper({"ref": DBRef("coll", 5)}) - helper({"ref": DBRef("coll", 5, foo="bar", bar=4)}) helper({"ref": DBRef("coll", 5, "foo")}) - helper({"ref": DBRef("coll", 5, "foo", foo="bar")}) - helper({"ref": Timestamp(1, 2)}) + helper({"ref": Timestamp(1,2)}) helper({"foo": MinKey()}) helper({"foo": MaxKey()}) - def encode_then_decode(dict): - return dict == (BSON.encode(dict)).decode() + def from_then_to_dict(dict): + return dict == (BSON.from_dict(dict)).to_dict() - qcheck.check_unittest(self, encode_then_decode, + qcheck.check_unittest(self, from_then_to_dict, qcheck.gen_mongo_dict(3)) def test_aware_datetime(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) - self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), - as_utc) - after = BSON.encode({"date": aware}).decode(tz_aware=True)["date"] - self.assertEqual(utc, after.tzinfo) - self.assertEqual(as_utc, after) - - def test_naive_decode(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - naive_utc = (aware - aware.utcoffset()).replace(tzinfo=None) - self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc) - after = BSON.encode({"date": aware}).decode()["date"] + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=SomeZone()) + utc_naive = (aware - aware.utcoffset()).replace(tzinfo=None) + self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), utc_naive) + after = BSON.from_dict({"date": aware}).to_dict()["date"] self.assertEqual(None, after.tzinfo) - self.assertEqual(naive_utc, after) - - def test_dst(self): - d = {"x": datetime.datetime(1993, 4, 4, 2)} - self.assertEqual(d, BSON.encode(d).decode()) + self.assertEqual(utc_naive, after) def test_bad_encode(self): - self.assertRaises(InvalidStringData, BSON.encode, + self.assertRaises(InvalidStringData, BSON.from_dict, {"lalala": '\xf4\xe0\xf0\xe1\xc0 Color Touch'}) - evil_list = {'a': []} - evil_list['a'].append(evil_list) - evil_dict = {} - evil_dict['a'] = evil_dict - for evil_data in [evil_dict, evil_list]: - self.assertRaises(RuntimeError, BSON.encode, evil_data) def test_overflow(self): - self.assert_(BSON.encode({"x": 9223372036854775807L})) - self.assertRaises(OverflowError, BSON.encode, - {"x": 9223372036854775808L}) - - self.assert_(BSON.encode({"x": -9223372036854775808L})) - self.assertRaises(OverflowError, BSON.encode, - {"x": -9223372036854775809L}) - - def test_small_long_encode_decode(self): - encoded1 = BSON.encode({'x': 256}) - decoded1 = BSON.decode(encoded1)['x'] - self.assertEqual(256, decoded1) - self.assertEqual(type(256), type(decoded1)) - - encoded2 = BSON.encode({'x': 256L}) - decoded2 = BSON.decode(encoded2)['x'] - self.assertEqual(256L, decoded2) - self.assertEqual(type(256L), type(decoded2)) + self.assert_(BSON.from_dict({"x": 9223372036854775807L})) + self.assertRaises(OverflowError, BSON.from_dict, {"x": 9223372036854775808L}) - self.assertNotEqual(type(decoded1), type(decoded2)) + self.assert_(BSON.from_dict({"x": -9223372036854775808L})) + self.assertRaises(OverflowError, BSON.from_dict, {"x": -9223372036854775809L}) def test_tuple(self): self.assertEqual({"tuple": [1, 2]}, - BSON.encode({"tuple": (1, 2)}).decode()) + BSON.from_dict({"tuple": (1, 2)}).to_dict()) def test_uuid(self): if not should_test_uuid: raise SkipTest() id = uuid.uuid4() - transformed_id = (BSON.encode({"id": id})).decode()["id"] + transformed_id = (BSON.from_dict({"id": id})).to_dict()["id"] self.assert_(isinstance(transformed_id, uuid.UUID)) self.assertEqual(id, transformed_id) @@ -260,103 +223,72 @@ # that doesn't really test anything but the lack of a segfault. def test_unicode_regex(self): regex = re.compile(u'revisi\xf3n') - BSON.encode({"regex": regex}).decode() + BSON.from_dict({"regex": regex}).to_dict() def test_non_string_keys(self): - self.assertRaises(InvalidDocument, BSON.encode, {8.9: "test"}) + self.assertRaises(InvalidDocument, BSON.from_dict, {8.9: "test"}) + + def test_large_document(self): + self.assertRaises(InvalidDocument, BSON.from_dict, {"key": "x"*4*1024*1024}) def test_utf8(self): w = {u"aéあ": u"aéあ"} - self.assertEqual(w, BSON.encode(w).decode()) + self.assertEqual(w, BSON.from_dict(w).to_dict()) x = {u"aéあ".encode("utf-8"): u"aéあ".encode("utf-8")} - self.assertEqual(w, BSON.encode(x).decode()) + self.assertEqual(w, BSON.from_dict(x).to_dict()) y = {"hello": u"aé".encode("iso-8859-1")} - self.assertRaises(InvalidStringData, BSON.encode, y) + self.assertRaises(InvalidStringData, BSON.from_dict, y) z = {u"aé".encode("iso-8859-1"): "hello"} - self.assertRaises(InvalidStringData, BSON.encode, z) + self.assertRaises(InvalidStringData, BSON.from_dict, z) def test_null_character(self): doc = {"a": "\x00"} - self.assertEqual(doc, BSON.encode(doc).decode()) + self.assertEqual(doc, BSON.from_dict(doc).to_dict()) doc = {"a": u"\x00"} - self.assertEqual(doc, BSON.encode(doc).decode()) + self.assertEqual(doc, BSON.from_dict(doc).to_dict()) - self.assertRaises(InvalidDocument, BSON.encode, {"\x00": "a"}) - self.assertRaises(InvalidDocument, BSON.encode, {u"\x00": "a"}) + self.assertRaises(InvalidDocument, BSON.from_dict, {"\x00": "a"}) + self.assertRaises(InvalidDocument, BSON.from_dict, {u"\x00": "a"}) - self.assertRaises(InvalidDocument, BSON.encode, - {"a": re.compile("ab\x00c")}) - self.assertRaises(InvalidDocument, BSON.encode, - {"a": re.compile(u"ab\x00c")}) + self.assertRaises(InvalidDocument, BSON.from_dict, {"a": re.compile("ab\x00c")}) + self.assertRaises(InvalidDocument, BSON.from_dict, {"a": re.compile(u"ab\x00c")}) def test_move_id(self): self.assertEqual("\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" "\x02a\x00\x02\x00\x00\x00a\x00\x00", - BSON.encode(SON([("a", "a"), ("_id", "a")]))) + BSON.from_dict(SON([("a", "a"), ("_id", "a")]))) self.assertEqual("\x2c\x00\x00\x00" "\x02_id\x00\x02\x00\x00\x00b\x00" "\x03b\x00" "\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" "\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", - BSON.encode(SON([("b", - SON([("a", "a"), ("_id", "a")])), - ("_id", "b")]))) + BSON.from_dict(SON([("b", SON([("a", "a"), ("_id", "a")])), + ("_id", "b")]))) def test_dates(self): doc = {"early": datetime.datetime(1686, 5, 5), "late": datetime.datetime(2086, 5, 5)} try: - self.assertEqual(doc, BSON.encode(doc).decode()) + self.assertEqual(doc, BSON.from_dict(doc).to_dict()) except ValueError: # Ignore ValueError when no C ext, since it's probably # a problem w/ 32-bit Python - we work around this in the # C ext, though. - if bson.has_c(): + if pymongo.has_c(): raise def test_custom_class(self): - self.assert_(isinstance(BSON.encode({}).decode(), dict)) - self.assertFalse(isinstance(BSON.encode({}).decode(), SON)) - self.assert_(isinstance(BSON.encode({}).decode(SON), SON)) - - self.assertEqual(1, BSON.encode({"x": 1}).decode(SON)["x"]) - - x = BSON.encode({"x": [{"y": 1}]}) - self.assert_(isinstance(x.decode(SON)["x"][0], SON)) - - def test_subclasses(self): - # make sure we can serialize subclasses of native Python types. - class _myint(int): - pass - - class _myfloat(float): - pass - - class _myunicode(unicode): - pass - - d = {'a': _myint(42), 'b': _myfloat(63.9), - 'c': _myunicode('hello world') - } - d2 = BSON.encode(d).decode() - for key, value in d2.iteritems(): - orig_value = d[key] - orig_type = orig_value.__class__.__bases__[0] - self.assertEqual(type(value), orig_type) - self.assertEqual(value, orig_type(value)) + self.assert_(isinstance(BSON.from_dict({}).to_dict(), dict)) + self.failIf(isinstance(BSON.from_dict({}).to_dict(), SON)) + self.assert_(isinstance(BSON.from_dict({}).to_dict(SON), SON)) + + self.assertEqual(1, BSON.from_dict({"x": 1}).to_dict(SON)["x"]) - def test_ordered_dict(self): - try: - from collections import OrderedDict - except ImportError: - raise SkipTest() - d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual(d, BSON.encode(d).decode(as_class=OrderedDict)) if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_code.py pymongo-1.7/test/test_code.py --- pymongo-1.11/test/test_code.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/test/test_code.py 2010-05-19 14:01:01.000000000 +0000 @@ -18,7 +18,7 @@ import sys sys.path[0:0] = [""] -from bson.code import Code +from pymongo.code import Code class TestCode(unittest.TestCase): @@ -49,7 +49,7 @@ self.assert_(a_code.startswith("hello")) self.assert_(a_code.endswith("world")) self.assert_(isinstance(a_code, Code)) - self.assertFalse(isinstance(a_string, Code)) + self.failIf(isinstance(a_string, Code)) self.assertEqual(a_code.scope, {}) a_code.scope["my_var"] = 5 self.assertEqual(a_code.scope, {"my_var": 5}) @@ -84,12 +84,6 @@ self.assertNotEqual(a, Code(b)) self.assertNotEqual(b, Code(a)) - def test_scope_kwargs(self): - self.assertEqual({"a": 1}, Code("", a=1).scope) - self.assertEqual({"a": 1}, Code("", {"a": 2}, a=1).scope) - self.assertEqual({"a": 1, "b": 2, "c": 3}, - Code("", {"b": 2}, a=1, c=3).scope) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_collection.py pymongo-1.7/test/test_collection.py --- pymongo-1.11/test/test_collection.py 2011-04-18 17:47:57.000000000 +0000 +++ pymongo-1.7/test/test_collection.py 2010-05-24 14:42:26.000000000 +0000 @@ -27,18 +27,17 @@ sys.path[0:0] = [""] -from bson.binary import Binary -from bson.code import Code -from bson.objectid import ObjectId -from bson.son import SON from pymongo import ASCENDING, DESCENDING +from pymongo.binary import Binary +from pymongo.code import Code from pymongo.collection import Collection from pymongo.errors import (DuplicateKeyError, InvalidDocument, InvalidName, InvalidOperation, - OperationFailure, - TimeoutError) + OperationFailure) +from pymongo.objectid import ObjectId +from pymongo.son import SON from test.test_connection import get_connection from test import (qcheck, version) @@ -102,23 +101,22 @@ self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) .count(), 1) db.test.create_index("hello") - self.assert_(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assert_(SON([(u"name", u"hello_1"), + (u"ns", u"pymongo_test.test"), + (u"key", SON([(u"hello", 1)]))]) in + list(db.system.indexes + .find({"ns": u"pymongo_test.test"}))) db.test.drop_indexes() self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) .count(), 1) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) - self.assert_(u"hello_-1_world_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) - - db.test.drop() - db.test.insert({'a': 1}) - db.test.insert({'a': 1}) - self.assertRaises(DuplicateKeyError, db.test.create_index, - 'a', unique=True) + self.assert_(SON([(u"name", u"hello_-1_world_1"), + (u"ns", u"pymongo_test.test"), + (u"key", SON([(u"hello", -1), + (u"world", 1)]))]) in + list(db.system.indexes + .find({"ns": u"pymongo_test.test"}))) def test_ensure_index(self): db = self.db @@ -153,6 +151,12 @@ db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) + db_name = self.db.name + self.connection.drop_database(self.db.name) + self.assertEqual("goodbye_1", + db.test.ensure_index("goodbye")) + self.assertEqual(None, db.test.ensure_index("goodbye")) + db.test.drop_index("goodbye_1") self.assertEqual("goodbye_1", db.test.create_index("goodbye")) @@ -171,8 +175,6 @@ time.sleep(1.1) self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) - # Clean up indexes for later tests - db.test.drop_indexes() def test_index_on_binary(self): db = self.db @@ -200,9 +202,11 @@ db.test.drop_index(name) self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) .count(), 2) - self.assert_(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assert_(SON([(u"name", u"hello_1"), + (u"ns", u"pymongo_test.test"), + (u"key", SON([(u"hello", 1)]))]) in + list(db.system.indexes + .find({"ns": u"pymongo_test.test"}))) db.test.drop_indexes() db.test.create_index("hello") @@ -214,33 +218,33 @@ db.test.drop_index([("goodbye", ASCENDING)]) self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) .count(), 2) - self.assert_(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assert_(SON([(u"name", u"hello_1"), + (u"ns", u"pymongo_test.test"), + (u"key", SON([(u"hello", 1)]))]) in + list(db.system.indexes + .find({"ns": u"pymongo_test.test"}))) def test_index_info(self): db = self.db db.test.drop_indexes() db.test.remove({}) - db.test.save({}) # create collection + db.test.save({}) # create collection self.assertEqual(len(db.test.index_information()), 1) self.assert_("_id_" in db.test.index_information()) db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual(db.test.index_information()["hello_1"], + {"key": [("hello", ASCENDING)]}) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual(db.test.index_information()["hello_1"], + {"key": [("hello", ASCENDING)]}) self.assertEqual(len(db.test.index_information()), 3) - self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"] - ) - self.assertEqual(True, - db.test.index_information()["hello_-1_world_1"]["unique"]) + self.assertEqual({"key": [("hello", DESCENDING), ("world", ASCENDING)], + "unique": True}, + db.test.index_information()["hello_-1_world_1"]) def test_field_selection(self): db = self.db @@ -328,11 +332,10 @@ self.assert_("extra thing" in db.test.find({}).next()) self.assert_("x" in db.test.find({}, ["x", "mike"]).next()) self.assert_("mike" in db.test.find({}, ["x", "mike"]).next()) - self.assertFalse("extra thing" in db.test.find({}, - ["x", "mike"]).next()) - self.assertFalse("x" in db.test.find({}, ["mike"]).next()) + self.failIf("extra thing" in db.test.find({}, ["x", "mike"]).next()) + self.failIf("x" in db.test.find({}, ["mike"]).next()) self.assert_("mike" in db.test.find({}, ["mike"]).next()) - self.assertFalse("extra thing" in db.test.find({}, ["mike"]).next()) + self.failIf("extra thing" in db.test.find({}, ["mike"]).next()) def test_fields_specifier_as_dict(self): db = self.db @@ -340,9 +343,9 @@ db.test.insert({"x": [1, 2, 3], "mike": "awesome"}) - self.assertEqual([1, 2, 3], db.test.find_one()["x"]) + self.assertEqual([1,2,3], db.test.find_one()["x"]) if version.at_least(db.connection, (1, 5, 1)): - self.assertEqual([2, 3], + self.assertEqual([2,3], db.test.find_one(fields={"x": {"$slice": -2}})["x"]) self.assert_("x" not in db.test.find_one(fields={"x": 0})) @@ -402,24 +405,24 @@ db.test.insert({"hello": "world"}) db.test.insert({"hello": {"hello": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, {"$hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"$hello": "world"}) + self.assertRaises(InvalidName, db.test.insert, {"hello": {"$hello": "world"}}) db.test.insert({"he$llo": "world"}) db.test.insert({"hello": {"hello$": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {".hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"hello": {".hello": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"hello.": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"hello": {"hello.": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"hel.lo": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, + self.assertRaises(InvalidName, db.test.insert, {"hello": {"hel.lo": "world"}}) db.test.update({"hello": "world"}, {"$inc": "hello"}) @@ -459,8 +462,7 @@ db.drop_collection("test") self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert(itertools.imap(lambda x: {"hello": "world"}, - itertools.repeat(None, 10))) + ids = db.test.insert(itertools.imap(lambda x: {"hello": "world"}, itertools.repeat(None, 10))) self.assertEqual(db.test.find().count(), 10) def test_save(self): @@ -478,7 +480,7 @@ db.test.save({"hello": "world"}) db.test.save({"hello": "mike"}) db.test.save({"hello": "world"}) - self.assertFalse(db.error()) + self.failIf(db.error()) db.drop_collection("test") db.test.create_index("hello", unique=True) @@ -509,16 +511,8 @@ self.assertRaises(expected_error, db.test.save, {"x": 2}, safe=True) self.assertRaises(expected_error, - db.test.update, {"x": 1}, - {"$inc": {"x": 1}}, safe=True) + db.test.update, {"x": 1}, {"$inc": {"x": 1}}, safe=True) - def test_error_code(self): - try: - self.db.test.update({}, {"$thismodifierdoesntexist": 1}, safe=True) - self.fail() - except OperationFailure, e: - if version.at_least(self.db.connection, (1, 3)): - self.assertEqual(10147, e.code) def test_index_on_subfield(self): db = self.db @@ -527,7 +521,7 @@ db.test.insert({"hello": {"a": 4, "b": 5}}) db.test.insert({"hello": {"a": 7, "b": 2}}) db.test.insert({"hello": {"a": 4, "b": 10}}) - self.assertFalse(db.error()) + self.failIf(db.error()) db.drop_collection("test") db.test.create_index("hello.a", unique=True) @@ -610,13 +604,9 @@ self.assertRaises(OperationFailure, db.test.update, {"_id": id}, {"$inc": {"x": 1}}, safe=True) - self.assertEqual(1, db.test.update({"_id": id}, - {"$inc": {"x": 2}}, - safe=True)["n"]) - - self.assertEqual(0, db.test.update({"_id": "foo"}, - {"$inc": {"x": 2}}, - safe=True)["n"]) + self.assertEqual(1, db.test.update({"_id": id}, {"$inc": {"x": 2}}, safe=True)["n"]) + + self.assertEqual(0, db.test.update({"_id": "foo"}, {"$inc": {"x": 2}}, safe=True)["n"]) def test_safe_save(self): db = self.db @@ -627,8 +617,7 @@ db.test.save({"hello": "world"}) self.assert_("E11000" in db.error()["err"]) - self.assertRaises(OperationFailure, db.test.save, - {"hello": "world"}, safe=True) + self.assertRaises(OperationFailure, db.test.save, {"hello": "world"}, safe=True) def test_safe_remove(self): db = self.db @@ -642,9 +631,8 @@ self.assertEqual(1, db.test.count()) if version.at_least(db.connection, (1, 1, 3, -1)): - self.assertRaises(OperationFailure, db.test.remove, - {"x": 1}, safe=True) - else: # Just test that it doesn't blow up + self.assertRaises(OperationFailure, db.test.remove, {"x": 1}, safe=True) + else: # Just test that it doesn't blow up db.test.remove({"x": 1}, safe=True) db.drop_collection("test") @@ -653,38 +641,6 @@ self.assertEqual(2, db.test.remove({}, safe=True)["n"]) self.assertEqual(0, db.test.remove({}, safe=True)["n"]) - def test_last_error_options(self): - if not version.at_least(self.connection, (1, 5, 1)): - raise SkipTest() - - # XXX: Fix this if we ever have a replica set unittest env. - # mongo >=1.7.6 errors with 'norepl' when w=2+ - # and we aren't replicated. - if not version.at_least(self.connection, (1, 7, 6)): - self.assertRaises(TimeoutError, self.db.test.save, - {"x": 1}, w=2, wtimeout=1) - self.assertRaises(TimeoutError, self.db.test.insert, - {"x": 1}, w=2, wtimeout=1) - self.assertRaises(TimeoutError, self.db.test.update, - {"x": 1}, {"y": 2}, w=2, wtimeout=1) - self.assertRaises(TimeoutError, self.db.test.remove, - {"x": 1}, {"y": 2}, w=2, wtimeout=1) - - self.db.test.save({"x": 1}, w=1, wtimeout=1) - self.db.test.insert({"x": 1}, w=1, wtimeout=1) - self.db.test.remove({"x": 1}, w=1, wtimeout=1) - self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1) - - def test_manual_last_error(self): - self.db.test.save({"x": 1}) - # XXX: Fix this if we ever have a replica set unittest env. - # mongo >=1.7.6 errors with 'norepl' when w=2+ - # and we aren't replicated - if not version.at_least(self.connection, (1, 7, 6)): - self.assertRaises(TimeoutError, self.db.command, - "getlasterror", w=2, wtimeout=1) - self.db.command("getlasterror", w=1, wtimeout=1) - def test_count(self): db = self.db db.drop_collection("test") @@ -702,10 +658,9 @@ eval = db.test.group(*args) self.assertEqual(eval, expected) - self.assertEqual([], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) + + self.assertEqual([], db.test.group([], {}, {"count": 0}, + "function (obj, prev) { prev.count++; }")) db.test.save({"a": 2}) db.test.save({"b": 5}) @@ -713,13 +668,11 @@ self.assertEqual([{"count": 3}], db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) + "function (obj, prev) { prev.count++; }")) self.assertEqual([{"count": 1}], db.test.group([], {"a": {"$gt": 1}}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) + "function (obj, prev) { prev.count++; }")) db.test.save({"a": 2, "b": 3}) @@ -727,39 +680,33 @@ {"a": None, "count": 1}, {"a": 1, "count": 1}], db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) + "function (obj, prev) { prev.count++; }")) # modifying finalize self.assertEqual([{"a": 2, "count": 3}, {"a": None, "count": 2}, {"a": 1, "count": 2}], db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", + "function (obj, prev) { prev.count++; }", "function (obj) { obj.count++; }")) # returning finalize self.assertEqual([2, 1, 1], db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", + "function (obj, prev) { prev.count++; }", "function (obj) { return obj.count; }")) # keyf self.assertEqual([2, 2], - db.test.group("function (obj) { if (obj.a == 2) " - "{ return {a: true} }; " + db.test.group("function (obj) { if (obj.a == 2) { return {a: true} }; " "return {b: true}; }", {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", + "function (obj, prev) { prev.count++; }", "function (obj) { return obj.count; }")) # no key self.assertEqual([{"count": 4}], db.test.group(None, {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) + "function (obj, prev) { prev.count++; }")) warnings.simplefilter("error") self.assertRaises(DeprecationWarning, @@ -768,8 +715,7 @@ command=False) warnings.simplefilter("default") - self.assertRaises(OperationFailure, db.test.group, - [], {}, {}, "5 ++ 5") + self.assertRaises(OperationFailure, db.test.group, [], {}, {}, "5 ++ 5") def test_group_with_scope(self): db = self.db @@ -786,10 +732,9 @@ Code(reduce_function, {"inc_value": 2}))[0]['count']) - self.assertEqual(1, - db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 0.5}))[0]['count']) + self.assertEqual(1, db.test.group([], {}, {"count": 0}, + Code(reduce_function, + {"inc_value": 0.5}))[0]['count']) if version.at_least(db.connection, (1, 1)): self.assertEqual(2, db.test.group([], {}, {"count": 0}, @@ -881,8 +826,7 @@ self.assertRaises(TypeError, db.test.find, snapshot=5) list(db.test.find(snapshot=True)) - self.assertRaises(OperationFailure, list, - db.test.find(snapshot=True).sort("foo", 1)) + self.assertRaises(OperationFailure, list, db.test.find(snapshot=True).sort("foo", 1)) def test_find_one(self): db = self.db @@ -894,8 +838,7 @@ self.assertEqual(db.test.find_one(id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) - self.assertEqual(db.test.find_one({"hello": "world"}), - db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) self.assert_("hello" in db.test.find_one(fields=["hello"])) self.assert_("hello" not in db.test.find_one(fields=["foo"])) @@ -911,7 +854,7 @@ db.test.save({"_id": 5}) self.assert_(db.test.find_one(5)) - self.assertFalse(db.test.find_one(6)) + self.failIf(db.test.find_one(6)) def test_remove_non_objectid(self): db = self.db @@ -949,9 +892,9 @@ def to_list(foo): return [bar["x"] for bar in foo] - self.assertEqual([2, 1, 3], to_list(db.test.find())) - self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)]))) - self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)]))) + self.assertEqual([2,1,3], to_list(db.test.find())) + self.assertEqual([1,2,3], to_list(db.test.find(sort=[("x", 1)]))) + self.assertEqual([3,2,1], to_list(db.test.find(sort=[("x", -1)]))) self.assertRaises(TypeError, db.test.find, sort=5) self.assertRaises(TypeError, db.test.find, sort="hello") @@ -1011,11 +954,8 @@ self.db.test.save({"query": "foo"}) self.db.test.save({"bar": "foo"}) - self.assertEqual(1, - self.db.test.find({"query": {"$ne": None}}).count()) - self.assertEqual(1, - len(list(self.db.test.find({"query": {"$ne": None}}))) - ) + self.assertEqual(1, self.db.test.find({"query": {"$ne": None}}).count()) + self.assertEqual(1, len(list(self.db.test.find({"query": {"$ne": None}})))) def test_min_query(self): self.db.drop_collection("test") @@ -1029,25 +969,12 @@ "$query": {}})[0]["x"]) def test_insert_large_document(self): - max_size = self.db.connection.max_bson_size - half_size = max_size / 2 - if version.at_least(self.db.connection, (1, 7, 4)): - self.assertEqual(max_size, 16777216) self.assertRaises(InvalidDocument, self.db.test.insert, - {"foo": "x" * max_size}) - self.assertRaises(InvalidDocument, self.db.test.save, - {"foo": "x" * max_size}) + {"foo": "x" * 4 * 1024 * 1024}) self.assertRaises(InvalidDocument, self.db.test.insert, - [{"x": 1}, {"foo": "x" * max_size}]) - self.db.test.insert([{"foo": "x" * half_size}, - {"foo": "x" * half_size}], safe=True) - - self.db.test.insert({"bar": "x"}) - self.assertRaises(InvalidDocument, self.db.test.update, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}, - safe=True) - self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 15)}, - safe=True) + [{"x": 1}, {"foo": "x" * 4 * 1024 * 1024}]) + self.db.test.insert([{"foo": "x" * 2 * 1024 * 1024}, + {"foo": "x" * 2 * 1024 * 1024}], safe=True) def test_map_reduce(self): if not version.at_least(self.db.connection, (1, 1, 1)): @@ -1073,55 +1000,19 @@ " }" " return total;" "}") - result = db.test.map_reduce(map, reduce, out='mrunittests') + result = db.test.map_reduce(map, reduce) self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - if version.at_least(self.db.connection, (1, 7, 4)): - db.test.insert({"id": 5, "tags": ["hampster"]}) - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - db.test.remove({"id": 5}) - result = db.test.map_reduce(map, reduce, - out='mrunittests', merge_output=True) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce(map, reduce, - out='mrunittests', reduce_output=True) - self.assertEqual(6, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(4, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(2, result.find_one({"_id": "mouse"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - self.assertRaises(InvalidOperation, - db.test.map_reduce, - map, - reduce, - out='mrunittests', - merge_output=True, - reduce_output=True) - - full_result = db.test.map_reduce(map, reduce, - out='mrunittests', full_response=True) + full_result = db.test.map_reduce(map, reduce, full_response=True) self.assertEqual(6, full_result["counts"]["emit"]) - result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2) + result = db.test.map_reduce(map, reduce, limit=2) self.assertEqual(2, result.find_one({"_id": "cat"})["value"]) self.assertEqual(1, result.find_one({"_id": "dog"})["value"]) self.assertEqual(None, result.find_one({"_id": "mouse"})) - if version.at_least(self.db.connection, (1, 7, 4)): - result = db.test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(3, len(result)) - self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse")) - - full_result = db.test.inline_map_reduce(map, reduce, - full_response=True) - self.assertEqual(6, full_result["counts"]["emit"]) - def test_messages_with_unicode_collection_names(self): db = self.db @@ -1145,44 +1036,20 @@ def test_as_class(self): c = self.db.test - c.drop() + c.remove() c.insert({"x": 1}) self.assert_(isinstance(c.find().next(), dict)) - self.assertFalse(isinstance(c.find().next(), SON)) + self.failIf(isinstance(c.find().next(), SON)) self.assert_(isinstance(c.find(as_class=SON).next(), SON)) self.assert_(isinstance(c.find_one(), dict)) - self.assertFalse(isinstance(c.find_one(), SON)) + self.failIf(isinstance(c.find_one(), SON)) self.assert_(isinstance(c.find_one(as_class=SON), SON)) self.assertEqual(1, c.find_one(as_class=SON)["x"]) self.assertEqual(1, c.find(as_class=SON).next()["x"]) - def test_find_and_modify(self): - c = self.db.test - c.drop() - c.insert({'_id': 1, 'i': 1}) - - self.assertEqual({'_id': 1, 'i': 1}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, remove=True)) - - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - upsert=True, new=True)) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_connection.py pymongo-1.7/test/test_connection.py --- pymongo-1.11/test/test_connection.py 2011-04-18 17:47:57.000000000 +0000 +++ pymongo-1.7/test/test_connection.py 2010-05-19 14:01:01.000000000 +0000 @@ -14,7 +14,6 @@ """Test the connection module.""" -import datetime import os import sys import time @@ -24,17 +23,14 @@ from nose.plugins.skip import SkipTest -from bson.son import SON -from bson.tz_util import utc -from pymongo.connection import (Connection, - _parse_uri) +from pymongo.connection import Connection from pymongo.database import Database from pymongo.errors import (AutoReconnect, - ConfigurationError, ConnectionFailure, InvalidName, InvalidURI, OperationFailure) +from pymongo.son import SON from test import version @@ -53,12 +49,11 @@ def test_types(self): self.assertRaises(TypeError, Connection, 1) self.assertRaises(TypeError, Connection, 1.14) + self.assertRaises(TypeError, Connection, []) self.assertRaises(TypeError, Connection, "localhost", "27017") self.assertRaises(TypeError, Connection, "localhost", 1.14) self.assertRaises(TypeError, Connection, "localhost", []) - self.assertRaises(ConfigurationError, Connection, []) - def test_constants(self): Connection.HOST = self.host Connection.PORT = self.port @@ -80,11 +75,6 @@ self.assert_(Connection(self.host, self.port)) - def test_host_w_port(self): - self.assert_(Connection("%s:%d" % (self.host, self.port))) - self.assertRaises(ConnectionFailure, Connection, - "%s:1234567" % self.host, self.port) - def test_repr(self): self.assertEqual(repr(Connection(self.host, self.port)), "Connection('%s', %s)" % (self.host, self.port)) @@ -92,8 +82,6 @@ def test_getters(self): self.assertEqual(Connection(self.host, self.port).host, self.host) self.assertEqual(Connection(self.host, self.port).port, self.port) - self.assertEqual(set([(self.host, self.port)]), - Connection(self.host, self.port).nodes) def test_get_db(self): connection = Connection(self.host, self.port) @@ -150,14 +138,14 @@ self.assertRaises(InvalidName, c.copy_database, "foo", "$foo") - c.pymongo_test.test.drop() + c.drop_database("pymongo_test") c.drop_database("pymongo_test1") c.drop_database("pymongo_test2") c.pymongo_test.test.insert({"foo": "bar"}) - self.assertFalse("pymongo_test1" in c.database_names()) - self.assertFalse("pymongo_test2" in c.database_names()) + self.failIf("pymongo_test1" in c.database_names()) + self.failIf("pymongo_test2" in c.database_names()) c.copy_database("pymongo_test", "pymongo_test1") @@ -170,26 +158,29 @@ self.assert_("pymongo_test2" in c.database_names()) self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"]) - if version.at_least(c, (1, 3, 3, 1)): - c.drop_database("pymongo_test1") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + if version.at_least(c, (1, 3, 3, 1)): c.pymongo_test.add_user("mike", "password") self.assertRaises(OperationFailure, c.copy_database, "pymongo_test", "pymongo_test1", username="foo", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) + self.failIf("pymongo_test1" in c.database_names()) self.assertRaises(OperationFailure, c.copy_database, "pymongo_test", "pymongo_test1", username="mike", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) + self.failIf("pymongo_test1" in c.database_names()) c.copy_database("pymongo_test", "pymongo_test1", username="mike", password="password") self.assert_("pymongo_test1" in c.database_names()) self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"]) + c.drop_database("pymongo_test1") + def test_iteration(self): connection = Connection(self.host, self.port) @@ -237,72 +228,39 @@ coll.count() def test_parse_uri(self): - self.assertEqual(([("localhost", 27017)], None, None, None, None, {}), - _parse_uri("localhost", 27017)) - self.assertEqual(([("localhost", 27018)], None, None, None, None, {}), - _parse_uri("localhost", 27018)) - self.assertRaises(InvalidURI, _parse_uri, - "http://foobar.com", 27017) - self.assertRaises(InvalidURI, _parse_uri, - "http://foo@foobar.com", 27017) - - self.assertEqual(([("localhost", 27017)], None, None, None, None, {}), - _parse_uri("mongodb://localhost", 27017)) - self.assertEqual(([("localhost", 27017)], None, - "fred", "foobar", None, {}), - _parse_uri("mongodb://fred:foobar@localhost", - 27017)) - self.assertEqual(([("localhost", 27017)], "baz", - "fred", "foobar", None, {}), - _parse_uri("mongodb://fred:foobar@localhost/baz", - 27017)) + self.assertEqual(([("localhost", 27017)], None, None, None), + Connection._parse_uri("localhost")) + self.assertRaises(InvalidURI, Connection._parse_uri, "http://foobar.com") + self.assertRaises(InvalidURI, Connection._parse_uri, "http://foo@foobar.com") + + self.assertEqual(([("localhost", 27017)], None, None, None), + Connection._parse_uri("mongodb://localhost")) + self.assertEqual(([("localhost", 27017)], None, "fred", "foobar"), + Connection._parse_uri("mongodb://fred:foobar@localhost")) + self.assertEqual(([("localhost", 27017)], "baz", "fred", "foobar"), + Connection._parse_uri("mongodb://fred:foobar@localhost/baz")) self.assertEqual(([("example1.com", 27017), ("example2.com", 27017)], - None, None, None, None, {}), - _parse_uri("mongodb://" - "example1.com:27017,example2.com:27017", - 27018)) + None, None, None), + Connection._parse_uri("mongodb://example1.com:27017,example2.com:27017")) self.assertEqual(([("localhost", 27017), ("localhost", 27018), - ("localhost", 27019)], None, None, None, None, {}), - _parse_uri("mongodb://localhost," - "localhost:27018,localhost:27019", - 27017)) - - self.assertEqual(([("localhost", 27018)], None, None, None, None, {}), - _parse_uri("localhost:27018", 27017)) - self.assertEqual(([("localhost", 27017)], "foo", None, None, None, {}), - _parse_uri("localhost/foo", 27017)) - self.assertEqual(([("localhost", 27017)], None, None, None, None, {}), - _parse_uri("localhost/", 27017)) - - self.assertEqual(([("localhost", 27017)], "test", - None, None, "yield_historical.in", {}), - _parse_uri("mongodb://" - "localhost/test.yield_historical.in", - 27017)) - self.assertEqual(([("localhost", 27017)], "test", "fred", - "foobar", "yield_historical.in", {}), - _parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in", - 27017)) - self.assertEqual(([("example1.com", 27017), ("example2.com", 27017)], - "test", None, None, "yield_historical.in", {}), - _parse_uri("mongodb://example1.com:27017,example2.com" - ":27017/test.yield_historical.in", - 27017)) - self.assertEqual(([("localhost", 27017)], "test", "fred", "foobar", - "yield_historical.in", {'slaveok': 'true'}), - _parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in?slaveok=true", - 27017)) + ("localhost", 27019)], None, None, None), + Connection._parse_uri("mongodb://localhost,localhost:27018,localhost:27019")) + + self.assertEqual(([("localhost", 27018)], None, None, None), + Connection._parse_uri("localhost:27018")) + self.assertEqual(([("localhost", 27017)], "foo", None, None), + Connection._parse_uri("localhost/foo")) + self.assertEqual(([("localhost", 27017)], None, None, None), + Connection._parse_uri("localhost/")) def test_from_uri(self): c = Connection(self.host, self.port) - self.assertRaises(InvalidURI, Connection, "mongodb://localhost/baz") + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://localhost/baz") - self.assertEqual(c, Connection("mongodb://%s:%s" % - (self.host, self.port))) + self.assertEqual(c, Connection.from_uri("mongodb://%s:%s" % + (self.host, self.port))) c.admin.system.users.remove({}) c.pymongo_test.system.users.remove({}) @@ -310,28 +268,26 @@ c.admin.add_user("admin", "pass") c.pymongo_test.add_user("user", "pass") - self.assertRaises(ConfigurationError, Connection, + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://foo:bar@%s:%s" % (self.host, self.port)) - self.assertRaises(ConfigurationError, Connection, + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://admin:bar@%s:%s" % (self.host, self.port)) - self.assertRaises(ConfigurationError, Connection, + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://user:pass@%s:%s" % (self.host, self.port)) - Connection("mongodb://admin:pass@%s:%s" % (self.host, self.port)) + Connection.from_uri("mongodb://admin:pass@%s:%s" % (self.host, self.port)) - self.assertRaises(ConfigurationError, Connection, + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://admin:pass@%s:%s/pymongo_test" % (self.host, self.port)) - self.assertRaises(ConfigurationError, Connection, + self.assertRaises(InvalidURI, Connection.from_uri, "mongodb://user:foo@%s:%s/pymongo_test" % (self.host, self.port)) - Connection("mongodb://user:pass@%s:%s/pymongo_test" % - (self.host, self.port)) + Connection.from_uri("mongodb://user:pass@%s:%s/pymongo_test" % + (self.host, self.port)) - self.assert_(Connection("mongodb://%s:%s" % - (self.host, self.port), - slave_okay=True).slave_okay) - self.assert_(Connection("mongodb://%s:%s/?slaveok=true;w=2" % - (self.host, self.port)).slave_okay) + self.assert_(Connection.from_uri("mongodb://%s:%s" % + (self.host, self.port), + slave_okay=True).slave_okay) def test_fork(self): """Test using a connection before and after a fork. @@ -348,7 +304,6 @@ # Failure occurs if the connection is used before the fork db.test.find_one() - db.connection.end_request() def loop(pipe): while True: @@ -400,99 +355,53 @@ self.assertEqual(dict, c.document_class) self.assert_(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) + self.failIf(isinstance(db.test.find_one(), SON)) c.document_class = SON self.assertEqual(SON, c.document_class) self.assert_(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) + self.failIf(isinstance(db.test.find_one(as_class=dict), SON)) c = Connection(self.host, self.port, document_class=SON) db = c.pymongo_test self.assertEqual(SON, c.document_class) self.assert_(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) + self.failIf(isinstance(db.test.find_one(as_class=dict), SON)) c.document_class = dict self.assertEqual(dict, c.document_class) self.assert_(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) - - def test_network_timeout(self): - no_timeout = Connection(self.host, self.port) - timeout = Connection(self.host, self.port, network_timeout=0.1) - - no_timeout.pymongo_test.drop_collection("test") - no_timeout.pymongo_test.test.insert({"x": 1}, safe=True) - - where_func = """function (doc) { - var d = new Date().getTime() + 200; - var x = new Date().getTime(); - while (x < d) { - x = new Date().getTime(); - } - return true; -}""" - - def get_x(db): - return db.test.find().where(where_func).next()["x"] - self.assertEqual(1, get_x(no_timeout.pymongo_test)) - self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test) - - def get_x_timeout(db, t): - return db.test.find( - network_timeout=t).where(where_func).next()["x"] - self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None)) - self.assertRaises(ConnectionFailure, get_x_timeout, - no_timeout.pymongo_test, 0.1) - - def test_tz_aware(self): - aware = Connection(self.host, self.port, tz_aware=True) - naive = Connection(self.host, self.port) - aware.pymongo_test.drop_collection("test") - - now = datetime.datetime.utcnow() - aware.pymongo_test.test.insert({"x": now}, safe=True) - - self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual( - aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) - - def test_ipv6(self): - self.assertRaises(InvalidURI, _parse_uri, "::1", 27017) - self.assertRaises(InvalidURI, _parse_uri, "[::1", 27017) - self.assertRaises(InvalidURI, _parse_uri, "::1]:27017") - self.assertRaises(InvalidURI, _parse_uri, "mongodb://::1", 27017) - self.assert_(_parse_uri, "mongodb://[::1]:27017/?slaveOk=true") - self.assert_(_parse_uri, - "[::1]:27017,[2001:0db8:85a3:0000:0000:8a2e:0370:7334]" - ":27018,192.168.0.212:27019,localhost:27020") - self.assert_(_parse_uri, - "mongodb://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]" - ":27017/?slaveOk=true") - try: - connection = Connection("[::1]") - except: - # Either mongod was started without --ipv6 - # or the OS doesn't support it (or both). - raise SkipTest() + self.failIf(isinstance(db.test.find_one(), SON)) - # Try a few simple things - connection = Connection("mongodb://[::1]:27017") - connection = Connection("mongodb://[::1]:27017/?slaveOk=true") - connection = Connection("[::1]:27017,localhost:27017") - connection = Connection("localhost:27017,[::1]:27017") - connection.pymongo_test.test.save({"dummy": u"object"}) - connection.pymongo_test_bernie.test.save({"dummy": u"object"}) - - dbs = connection.database_names() - self.assert_("pymongo_test" in dbs) - self.assert_("pymongo_test_bernie" in dbs) +# TODO come up with a different way to test `network_timeout`. This is just +# too sketchy. +# +# def test_socket_timeout(self): +# no_timeout = Connection(self.host, self.port) +# timeout = Connection(self.host, self.port, network_timeout=0.1) + +# no_timeout.pymongo_test.drop_collection("test") + +# no_timeout.pymongo_test.test.save({"x": 1}) + +# where_func = """function (doc) { +# var d = new Date().getTime() + 1000; +# var x = new Date().getTime(); +# while (x < d) { +# x = new Date().getTime(); +# } +# return true; +# }""" + +# def get_x(db): +# return db.test.find().where(where_func).next()["x"] + +# self.assertEqual(1, get_x(no_timeout.pymongo_test)) +# self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test) +# self.assertEqual(1, no_timeout.pymongo_test.test.find().next()["x"]) if __name__ == "__main__": diff -Nru pymongo-1.11/test/test_cursor.py pymongo-1.7/test/test_cursor.py --- pymongo-1.11/test/test_cursor.py 2011-04-06 18:53:36.000000000 +0000 +++ pymongo-1.7/test/test_cursor.py 2010-05-19 14:01:01.000000000 +0000 @@ -22,13 +22,11 @@ from nose.plugins.skip import SkipTest -from bson.code import Code -from pymongo import (ASCENDING, - DESCENDING) +from pymongo.errors import InvalidOperation, OperationFailure from pymongo.cursor import Cursor from pymongo.database import Database -from pymongo.errors import (InvalidOperation, - OperationFailure) +from pymongo.code import Code +from pymongo import ASCENDING, DESCENDING from test_connection import get_connection import version @@ -54,7 +52,8 @@ def test_hint(self): db = self.db self.assertRaises(TypeError, db.test.find().hint, 5.5) - db.test.drop() + db.test.remove({}) + db.test.drop_indexes() for i in range(100): db.test.insert({"num": i, "foo": i}) @@ -87,6 +86,7 @@ self.assertRaises(TypeError, db.test.find().hint, index) + def test_limit(self): db = self.db @@ -94,7 +94,7 @@ self.assertRaises(TypeError, db.test.find().limit, "hello") self.assertRaises(TypeError, db.test.find().limit, 5.5) - db.test.drop() + db.test.remove({}) for i in range(100): db.test.save({"x": i}) @@ -134,48 +134,6 @@ break self.assertRaises(InvalidOperation, a.limit, 5) - def test_batch_size(self): - db = self.db - db.test.drop() - for x in range(200): - db.test.save({"x": x}) - - self.assertRaises(TypeError, db.test.find().batch_size, None) - self.assertRaises(TypeError, db.test.find().batch_size, "hello") - self.assertRaises(TypeError, db.test.find().batch_size, 5.5) - self.assertRaises(ValueError, db.test.find().batch_size, -1) - a = db.test.find() - for _ in a: - break - self.assertRaises(InvalidOperation, a.batch_size, 5) - - def cursor_count(cursor, expected_count): - count = 0 - for _ in cursor: - count += 1 - self.assertEqual(expected_count, count) - - cursor_count(db.test.find().batch_size(0), 200) - cursor_count(db.test.find().batch_size(1), 200) - cursor_count(db.test.find().batch_size(2), 200) - cursor_count(db.test.find().batch_size(5), 200) - cursor_count(db.test.find().batch_size(100), 200) - cursor_count(db.test.find().batch_size(500), 200) - - cursor_count(db.test.find().batch_size(0).limit(1), 1) - cursor_count(db.test.find().batch_size(1).limit(1), 1) - cursor_count(db.test.find().batch_size(2).limit(1), 1) - cursor_count(db.test.find().batch_size(5).limit(1), 1) - cursor_count(db.test.find().batch_size(100).limit(1), 1) - cursor_count(db.test.find().batch_size(500).limit(1), 1) - - cursor_count(db.test.find().batch_size(0).limit(10), 10) - cursor_count(db.test.find().batch_size(1).limit(10), 10) - cursor_count(db.test.find().batch_size(2).limit(10), 10) - cursor_count(db.test.find().batch_size(5).limit(10), 10) - cursor_count(db.test.find().batch_size(100).limit(10), 10) - cursor_count(db.test.find().batch_size(500).limit(10), 10) - def test_skip(self): db = self.db @@ -231,7 +189,7 @@ [("hello", DESCENDING)], DESCENDING) self.assertRaises(TypeError, db.test.find().sort, "hello", "world") - db.test.drop() + db.test.remove({}) unsort = range(10) random.shuffle(unsort) @@ -260,7 +218,7 @@ shuffled = list(expected) random.shuffle(shuffled) - db.test.drop() + db.test.remove({}) for (a, b) in shuffled: db.test.save({"a": a, "b": b}) @@ -277,7 +235,7 @@ def test_count(self): db = self.db - db.test.drop() + db.test.remove({}) self.assertEqual(0, db.test.find().count()) @@ -302,7 +260,7 @@ def test_where(self): db = self.db - db.test.drop() + db.test.remove({}) a = db.test.find() self.assertRaises(TypeError, a.where, 5) @@ -345,34 +303,59 @@ db = self.db db.drop_collection("test") - c = db.command("cursorInfo")["clientCursors_size"] + client_cursors = db.command("cursorInfo")["clientCursors_size"] + by_location = db.command("cursorInfo")["byLocation_size"] test = db.test for i in range(10000): test.insert({"i": i}) - self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) for _ in range(10): db.test.find_one() - self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) for _ in range(10): for x in db.test.find(): break - self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) a = db.test.find() for x in a: break - self.assertNotEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertNotEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertNotEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) del a - self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) a = db.test.find().limit(10) for x in a: break - self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"]) + + self.assertEqual(client_cursors, + db.command("cursorInfo")["clientCursors_size"]) + self.assertEqual(by_location, + db.command("cursorInfo")["byLocation_size"]) def test_rewind(self): self.db.test.save({"x": 1}) @@ -446,17 +429,8 @@ self.assertNotEqual(cursor, cursor.clone()) - class MyClass(dict): - pass - - cursor = self.db.test.find(as_class=MyClass) - for e in cursor: - self.assertEqual(type(MyClass()), type(e)) - cursor = self.db.test.find(as_class=MyClass) - self.assertEqual(type(MyClass()), type(cursor[0])) - def test_count_with_fields(self): - self.db.test.drop() + self.db.test.remove({}) self.db.test.save({"x": 1}) if not version.at_least(self.db.connection, (1, 1, 3, -1)): @@ -509,19 +483,12 @@ for a, b in izip(count(20), self.db.test.find()[40:45][20:]): self.assertEqual(a, b['i']) - self.assertEqual(80, - len(list(self.db.test.find()[40:45].limit(0).skip(20)) - ) - ) - for a, b in izip(count(20), - self.db.test.find()[40:45].limit(0).skip(20)): + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) + for a, b in izip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): self.assertEqual(a, b['i']) - self.assertEqual(80, - len(list(self.db.test.find().limit(10).skip(40)[20:])) - ) - for a, b in izip(count(20), - self.db.test.find().limit(10).skip(40)[20:]): + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) + for a, b in izip(count(20), self.db.test.find().limit(10).skip(40)[20:]): self.assertEqual(a, b['i']) self.assertEqual(1, len(list(self.db.test.find()[:1]))) @@ -529,14 +496,10 @@ self.assertEqual(1, len(list(self.db.test.find()[99:100]))) self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) - self.assertEqual(0, len(list(self.db.test.find()[10:10]))) - self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, - len(list(self.db.test.find()[10:10].limit(0).skip(20)) - ) - ) self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) + self.assertRaises(IndexError, lambda: self.db.test.find()[10:10]) + self.assertRaises(IndexError, lambda: self.db.test.find()[:0]) def test_getitem_numeric_index(self): self.db.drop_collection("test") @@ -552,8 +515,7 @@ self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) - self.assertRaises(IndexError, - lambda x: self.db.test.find().skip(50)[x], 50) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) def test_count_with_limit_and_skip(self): if not version.at_least(self.db.connection, (1, 1, 4, -1)): diff -Nru pymongo-1.11/test/test_database.py pymongo-1.7/test/test_database.py --- pymongo-1.11/test/test_database.py 2011-05-05 00:09:39.000000000 +0000 +++ pymongo-1.7/test/test_database.py 2010-06-04 17:58:49.000000000 +0000 @@ -20,25 +20,24 @@ sys.path[0:0] = [""] import unittest -from bson.code import Code -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.son import SON from pymongo import (ALL, ASCENDING, DESCENDING, helpers, OFF, SLOW_ONLY) +from pymongo.code import Code from pymongo.collection import Collection from pymongo.connection import Connection from pymongo.database import Database +from pymongo.dbref import DBRef from pymongo.errors import (CollectionInvalid, InvalidName, InvalidOperation, OperationFailure) -from pymongo.son_manipulator import (AutoReference, - NamespaceInjector) +from pymongo.objectid import ObjectId +from pymongo.son import SON +from pymongo.son_manipulator import AutoReference, NamespaceInjector from test import version from test.test_connection import get_connection @@ -113,23 +112,17 @@ db.test.save({"dummy": u"object"}) self.assert_("test" in db.collection_names()) db.drop_collection("test") - self.assertFalse("test" in db.collection_names()) + self.failIf("test" in db.collection_names()) db.test.save({"dummy": u"object"}) self.assert_("test" in db.collection_names()) db.drop_collection(u"test") - self.assertFalse("test" in db.collection_names()) + self.failIf("test" in db.collection_names()) db.test.save({"dummy": u"object"}) self.assert_("test" in db.collection_names()) db.drop_collection(db.test) - self.assertFalse("test" in db.collection_names()) - - db.test.save({"dummy": u"object"}) - self.assert_("test" in db.collection_names()) - db.test.drop() - self.assertFalse("test" in db.collection_names()) - db.test.drop() + self.failIf("test" in db.collection_names()) db.drop_collection(db.test.doesnotexist) @@ -148,15 +141,10 @@ self.assert_(db.validate_collection("test")) self.assert_(db.validate_collection(db.test)) - self.assert_(db.validate_collection(db.test, full=True)) - self.assert_(db.validate_collection(db.test, scandata=True)) - self.assert_(db.validate_collection(db.test, scandata=True, full=True)) - self.assert_(db.validate_collection(db.test, True, True)) - def test_profiling_levels(self): db = self.connection.pymongo_test - self.assertEqual(db.profiling_level(), OFF) # default + self.assertEqual(db.profiling_level(), OFF) #default self.assertRaises(ValueError, db.set_profiling_level, 5.5) self.assertRaises(ValueError, db.set_profiling_level, None) @@ -212,9 +200,6 @@ prev_error.pop("lastOp", None) error = db.error() error.pop("lastOp", None) - # getLastError includes "connectionId" in recent - # server versions, getPrevError does not. - error.pop("connectionId", None) self.assertEqual(error, prev_error) db.test.find_one() @@ -241,7 +226,7 @@ self.assert_(db.last_status()["updatedExisting"]) db.test.update({"i": 1}, {"$set": {"i": 500}}) - self.assertFalse(db.last_status()["updatedExisting"]) + self.failIf(db.last_status()["updatedExisting"]) def test_password_digest(self): self.assertRaises(TypeError, helpers._password_digest, 5) @@ -266,20 +251,20 @@ self.assertRaises(TypeError, db.authenticate, 5, "password") self.assertRaises(TypeError, db.authenticate, "mike", 5) - self.assertFalse(db.authenticate("mike", "not a real password")) - self.assertFalse(db.authenticate("faker", "password")) + self.failIf(db.authenticate("mike", "not a real password")) + self.failIf(db.authenticate("faker", "password")) self.assert_(db.authenticate("mike", "password")) self.assert_(db.authenticate(u"mike", u"password")) db.remove_user("mike") - self.assertFalse(db.authenticate("mike", "password")) + self.failIf(db.authenticate("mike", "password")) - self.assertFalse(db.authenticate("Gustave", u"Dor\xe9")) + self.failIf(db.authenticate("Gustave", u"Dor\xe9")) db.add_user("Gustave", u"Dor\xe9") self.assert_(db.authenticate("Gustave", u"Dor\xe9")) db.add_user("Gustave", "password") - self.assertFalse(db.authenticate("Gustave", u"Dor\xe9")) + self.failIf(db.authenticate("Gustave", u"Dor\xe9")) self.assert_(db.authenticate("Gustave", u"password")) # just make sure there are no exceptions here @@ -310,10 +295,8 @@ obj = {"x": True} key = db.test.save(obj) self.assertEqual(obj, db.dereference(DBRef("test", key))) - self.assertEqual(obj, - db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, - db.dereference, DBRef("test", key, "foo")) + self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) + self.assertRaises(ValueError, db.dereference, DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} @@ -408,11 +391,11 @@ self.assert_(db.test.find_one({"x": 2})) db.test.remove({"x": 2}) - self.assertFalse(db.test.find_one({"x": 2})) + self.failIf(db.test.find_one({"x": 2})) self.assert_(db.test.find_one()) db.test.remove({}) - self.assertFalse(db.test.find_one()) + self.failIf(db.test.find_one()) def test_save_a_bunch(self): db = self.connection.pymongo_test @@ -490,7 +473,6 @@ self.assertEqual(0, db.system.js.count()) db.system_js.add = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) self.assertEqual(1, db.system.js.count()) self.assertEqual(6, db.system_js.add(1, 5)) @@ -508,20 +490,6 @@ db.system_js.no_param = Code("return 5;") self.assertEqual(5, db.system_js.no_param()) - def test_system_js_list(self): - db = self.connection.pymongo_test - db.system.js.remove() - self.assertEqual([], db.system_js.list()) - - db.system_js.foo = "blah" - self.assertEqual(["foo"], db.system_js.list()) - - db.system_js.bar = "baz" - self.assertEqual(set(["foo", "bar"]), set(db.system_js.list())) - - del db.system_js.foo - self.assertEqual(["bar"], db.system_js.list()) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_dbref.py pymongo-1.7/test/test_dbref.py --- pymongo-1.11/test/test_dbref.py 2011-04-06 19:00:37.000000000 +0000 +++ pymongo-1.7/test/test_dbref.py 2010-05-19 14:01:01.000000000 +0000 @@ -14,15 +14,12 @@ """Tests for the dbref module.""" -import pickle import unittest import sys sys.path[0:0] = [""] -from bson.objectid import ObjectId -from bson.dbref import DBRef - -from copy import deepcopy +from pymongo.objectid import ObjectId +from pymongo.dbref import DBRef class TestDBRef(unittest.TestCase): @@ -60,85 +57,28 @@ self.assertRaises(AttributeError, bar) def test_repr(self): - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), + self.assertEqual(repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef(u"coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef(u'coll', ObjectId('1234567890abcdef12345678'))" - ) - self.assertEqual(repr(DBRef("coll", 5, foo="bar")), - "DBRef('coll', 5, foo='bar')") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " - "'foo')") - self.assertEqual(repr(DBRef("coll", 5, "baz", foo="bar", baz=4)), - "DBRef('coll', 5, 'baz', foo='bar', baz=4)") + self.assertEqual(repr(DBRef(u"coll", ObjectId("1234567890abcdef12345678"))), + "DBRef(u'coll', ObjectId('1234567890abcdef12345678'))") + self.assertEqual(repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), + "DBRef('coll', ObjectId('1234567890abcdef12345678'), 'foo')") def test_cmp(self): self.assertEqual(DBRef("coll", ObjectId("1234567890abcdef12345678")), DBRef(u"coll", ObjectId("1234567890abcdef12345678"))) - self.assertNotEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678")), - DBRef(u"coll", - ObjectId("1234567890abcdef12345678"), "foo")) - self.assertNotEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678")), + self.assertNotEqual(DBRef("coll", ObjectId("1234567890abcdef12345678")), + DBRef(u"coll", ObjectId("1234567890abcdef12345678"), "foo")) + self.assertNotEqual(DBRef("coll", ObjectId("1234567890abcdef12345678")), DBRef("col", ObjectId("1234567890abcdef12345678"))) - self.assertNotEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678")), + self.assertNotEqual(DBRef("coll", ObjectId("1234567890abcdef12345678")), DBRef("coll", ObjectId("123456789011"))) - self.assertNotEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678")), 4) - self.assertEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo"), - DBRef(u"coll", - ObjectId("1234567890abcdef12345678"), "foo")) - self.assertNotEqual(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo"), - DBRef(u"coll", - ObjectId("1234567890abcdef12345678"), "bar")) - - def test_kwargs(self): - self.assertEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="bar")) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5)) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="baz")) - self.assertEqual("bar", DBRef("coll", 5, foo="bar").foo) - self.assertRaises(AttributeError, getattr, - DBRef("coll", 5, foo="bar"), "bar") - - def test_deepcopy(self): - a = DBRef('coll', 'asdf', 'db', x=[1]) - b = deepcopy(a) - - self.assertEqual(a, b) - self.assertNotEqual(id(a), id(b.x)) - self.assertEqual(a.x, b.x) - self.assertNotEqual(id(a.x), id(b.x)) - - b.x[0] = 2 - self.assertEqual(a.x, [1]) - self.assertEqual(b.x, [2]) - - def test_pickling(self): - dbr = DBRef('coll', 5, foo='bar') - pkl = pickle.dumps(dbr) - dbr2 = pickle.loads(pkl) - self.assertEqual(dbr, dbr2) - - def test_dbref_hash(self): - dbref_1a = DBRef('collection', 'id', 'database') - dbref_1b = DBRef('collection', 'id', 'database') - self.assertEquals(hash(dbref_1a), hash(dbref_1b)) - - dbref_2a = DBRef('collection', 'id', 'database', custom='custom') - dbref_2b = DBRef('collection', 'id', 'database', custom='custom') - self.assertEquals(hash(dbref_2a), hash(dbref_2b)) + self.assertNotEqual(DBRef("coll", ObjectId("1234567890abcdef12345678")), 4) + self.assertEqual(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo"), + DBRef(u"coll", ObjectId("1234567890abcdef12345678"), "foo")) + self.assertNotEqual(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo"), + DBRef(u"coll", ObjectId("1234567890abcdef12345678"), "bar")) - self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_grid_file.py pymongo-1.7/test/test_grid_file.py --- pymongo-1.11/test/test_grid_file.py 2011-04-06 19:05:36.000000000 +0000 +++ pymongo-1.7/test/test_grid_file.py 2010-05-19 14:01:01.000000000 +0000 @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # Copyright 2009-2010 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +25,6 @@ import unittest sys.path[0:0] = [""] -from bson.objectid import ObjectId from gridfs.grid_file import (_SEEK_CUR, _SEEK_END, GridIn, @@ -35,6 +32,7 @@ GridOut) from gridfs.errors import (NoFile, UnsupportedAPI) +from pymongo.objectid import ObjectId from test_connection import get_connection import qcheck @@ -60,6 +58,7 @@ g = GridOut(self.db.fs, f._id) self.assertEqual("hello world", g.read()) + f = GridIn(self.db.fs, filename="test") f.close() self.assertEqual(2, self.db.fs.files.find().count()) @@ -136,8 +135,10 @@ self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual("my_file", a.filename) + self.assertRaises(AttributeError, setattr, a, "name", "foo") self.assertEqual("text/html", a.content_type) + self.assertRaises(AttributeError, setattr, a, "content_type", "foo") self.assertEqual(0, a.length) self.assertRaises(AttributeError, setattr, a, "length", 5) @@ -149,8 +150,10 @@ self.assertRaises(AttributeError, setattr, a, "upload_date", 5) self.assertEqual(["foo"], a.aliases) + self.assertRaises(AttributeError, setattr, a, "aliases", []) self.assertEqual({"foo": 1}, a.metadata) + self.assertRaises(AttributeError, setattr, a, "metadata", {}) self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5) self.assertRaises(AttributeError, setattr, a, "md5", 5) @@ -172,8 +175,7 @@ self.assertEqual("hello", a.baz) self.assertRaises(AttributeError, getattr, a, "mike") - b = GridIn(self.db.fs, - content_type="text/html", chunk_size=1000, baz=100) + b = GridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) self.assertEqual("text/html", b.content_type) self.assertEqual(1000, b.chunk_size) self.assertEqual(100, b.baz) @@ -224,19 +226,6 @@ "upload_date", "aliases", "metadata", "md5"]: self.assertRaises(AttributeError, setattr, b, attr, 5) - def test_grid_out_file_document(self): - a = GridIn(self.db.fs) - a.write("foo bar") - a.close() - - b = GridOut(self.db.fs, file_document=self.db.fs.files.find_one()) - self.assertEqual("foo bar", b.read()) - - c = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one()) - self.assertEqual("foo bar", c.read()) - - self.assertRaises(NoFile, GridOut, self.db.fs, file_document={}) - def test_write_file_like(self): a = GridIn(self.db.fs) a.write("hello world") @@ -365,24 +354,6 @@ self.assertEqual("d", g.read(2)) self.assertEqual("", g.read(2)) - def test_readline(self): - f = GridIn(self.db.fs, chunkSize=5) - f.write("""Hello world, -How are you? -Hope all is well. -Bye""") - f.close() - - g = GridOut(self.db.fs, f._id) - self.assertEqual("H", g.read(1)) - self.assertEqual("ello world,\n", g.readline()) - self.assertEqual("How a", g.readline(5)) - self.assertEqual("", g.readline(0)) - self.assertEqual("re you?\n", g.readline()) - self.assertEqual("Hope all is well.\n", g.readline(1000)) - self.assertEqual("Bye", g.readline()) - self.assertEqual("", g.readline()) - def test_iterator(self): f = GridIn(self.db.fs) f.close() @@ -405,8 +376,7 @@ self.assertEqual(["he", "ll", "o ", "wo", "rl", "d"], list(g)) def test_read_chunks_unaligned_buffer_size(self): - in_data = ("This is a text that doesn't " - "quite fit in a single 16-byte chunk.") + in_data = "This is a text that doesn't quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() @@ -421,57 +391,5 @@ self.assertEqual(in_data, out_data) - def test_write_unicode(self): - f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, u"foo") - - f = GridIn(self.db.fs, encoding="utf-8") - f.write(u"foo") - f.close() - - g = GridOut(self.db.fs, f._id) - self.assertEqual("foo", g.read()) - - f = GridIn(self.db.fs, encoding="iso-8859-1") - f.write(u"aé") - f.close() - - g = GridOut(self.db.fs, f._id) - self.assertEqual(u"aé".encode("iso-8859-1"), g.read()) - - def test_set_after_close(self): - f = GridIn(self.db.fs, _id="foo", bar="baz") - - self.assertEqual("foo", f._id) - self.assertEqual("baz", f.bar) - self.assertRaises(AttributeError, getattr, f, "baz") - self.assertRaises(AttributeError, getattr, f, "uploadDate") - - self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "foo" - f.baz = 5 - - self.assertEqual("foo", f._id) - self.assertEqual("foo", f.bar) - self.assertEqual(5, f.baz) - self.assertRaises(AttributeError, getattr, f, "uploadDate") - - f.close() - - self.assertEqual("foo", f._id) - self.assertEqual("foo", f.bar) - self.assertEqual(5, f.baz) - self.assert_(f.uploadDate) - - self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "a" - f.baz = "b" - self.assertRaises(AttributeError, setattr, f, "upload_date", 5) - - g = GridOut(self.db.fs, f._id) - self.assertEqual("a", f.bar) - self.assertEqual("b", f.baz) - - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_gridfs.py pymongo-1.7/test/test_gridfs.py --- pymongo-1.11/test/test_gridfs.py 2011-05-04 19:21:50.000000000 +0000 +++ pymongo-1.7/test/test_gridfs.py 2010-05-19 14:01:01.000000000 +0000 @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # Copyright 2009-2010 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -90,6 +88,7 @@ self.assertEqual("foo", oid) self.assertEqual("hello world", self.fs.get("foo").read()) + def test_list(self): self.assertEqual([], self.fs.list()) self.fs.put("hello world") @@ -112,7 +111,7 @@ self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assert_(isinstance(raw["uploadDate"], datetime.datetime)) - self.assertEqual(256 * 1024, raw["chunkSize"]) + self.assertEqual(256*1024, raw["chunkSize"]) self.assert_(isinstance(raw["md5"], basestring)) def test_alt_collection(self): @@ -179,71 +178,6 @@ self.fs.delete(a) self.assertRaises(NoFile, self.fs.get_last_version, "test") - def test_get_last_version_with_metadata(self): - a = self.fs.put("foo", filename="test", author="author") - time.sleep(0.01) - b = self.fs.put("bar", filename="test", author="author") - - self.assertEqual("bar", self.fs.get_last_version(author="author").read()) - self.fs.delete(b) - self.assertEqual("foo", self.fs.get_last_version(author="author").read()) - self.fs.delete(a) - - a = self.fs.put("foo", filename="test", author="author1") - time.sleep(0.01) - b = self.fs.put("bar", filename="test", author="author2") - - self.assertEqual("foo", self.fs.get_last_version(author="author1").read()) - self.assertEqual("bar", self.fs.get_last_version(author="author2").read()) - self.assertEqual("bar", self.fs.get_last_version(filename="test").read()) - - self.assertRaises(NoFile, self.fs.get_last_version, author="author3") - self.assertRaises(NoFile, self.fs.get_last_version, filename="nottest", author="author1") - - self.fs.delete(a) - self.fs.delete(b) - - def test_get_version(self): - self.fs.put("foo", filename="test") - time.sleep(0.01) - self.fs.put("bar", filename="test") - time.sleep(0.01) - self.fs.put("baz", filename="test") - time.sleep(0.01) - - self.assertEqual("foo", self.fs.get_version("test", 0).read()) - self.assertEqual("bar", self.fs.get_version("test", 1).read()) - self.assertEqual("baz", self.fs.get_version("test", 2).read()) - - self.assertEqual("baz", self.fs.get_version("test", -1).read()) - self.assertEqual("bar", self.fs.get_version("test", -2).read()) - self.assertEqual("foo", self.fs.get_version("test", -3).read()) - - self.assertRaises(NoFile, self.fs.get_version, "test", 3) - self.assertRaises(NoFile, self.fs.get_version, "test", -4) - - def test_get_version_with_metadata(self): - a = self.fs.put("foo", filename="test", author="author1") - time.sleep(0.01) - b = self.fs.put("bar", filename="test", author="author1") - time.sleep(0.01) - c = self.fs.put("baz", filename="test", author="author2") - - self.assertEqual("foo", self.fs.get_version(filename="test", author="author1", version=-2).read()) - self.assertEqual("bar", self.fs.get_version(filename="test", author="author1", version=-1).read()) - self.assertEqual("foo", self.fs.get_version(filename="test", author="author1", version=0).read()) - self.assertEqual("bar", self.fs.get_version(filename="test", author="author1", version=1).read()) - self.assertEqual("baz", self.fs.get_version(filename="test", author="author2", version=0).read()) - self.assertEqual("baz", self.fs.get_version(filename="test", version=-1).read()) - self.assertEqual("baz", self.fs.get_version(filename="test", version=2).read()) - - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) - - self.fs.delete(a) - self.fs.delete(b) - self.fs.delete(c) - def test_put_filelike(self): oid = self.fs.put(StringIO("hello world"), chunk_size=1) self.assertEqual(11, self.db.fs.chunks.count()) @@ -253,42 +187,6 @@ oid = self.fs.put("hello") self.assertRaises(FileExists, self.fs.put, "world", _id=oid) - def test_exists(self): - oid = self.fs.put("hello") - self.assert_(self.fs.exists(oid)) - self.assert_(self.fs.exists({"_id": oid})) - self.assert_(self.fs.exists(_id=oid)) - - self.assertFalse(self.fs.exists(filename="mike")) - self.assertFalse(self.fs.exists("mike")) - - oid = self.fs.put("hello", filename="mike", foo=12) - self.assert_(self.fs.exists(oid)) - self.assert_(self.fs.exists({"_id": oid})) - self.assert_(self.fs.exists(_id=oid)) - self.assert_(self.fs.exists(filename="mike")) - self.assert_(self.fs.exists({"filename": "mike"})) - self.assert_(self.fs.exists(foo=12)) - self.assert_(self.fs.exists({"foo": 12})) - self.assert_(self.fs.exists(foo={"$gt": 11})) - self.assert_(self.fs.exists({"foo": {"$gt": 11}})) - - self.assertFalse(self.fs.exists(foo=13)) - self.assertFalse(self.fs.exists({"foo": 13})) - self.assertFalse(self.fs.exists(foo={"$gt": 12})) - self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) - - def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, u"hello") - - oid = self.fs.put(u"hello", encoding="utf-8") - self.assertEqual("hello", self.fs.get(oid).read()) - self.assertEqual("utf-8", self.fs.get(oid).encoding) - - oid = self.fs.put(u"aé", encoding="iso-8859-1") - self.assertEqual(u"aé".encode("iso-8859-1"), self.fs.get(oid).read()) - self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_json_util.py pymongo-1.7/test/test_json_util.py --- pymongo-1.11/test/test_json_util.py 2011-04-06 19:11:08.000000000 +0000 +++ pymongo-1.7/test/test_json_util.py 2010-05-19 14:01:01.000000000 +0000 @@ -26,24 +26,14 @@ import simplejson as json except ImportError: json_lib = False -try: - import uuid - should_test_uuid = True -except ImportError: - should_test_uuid = False from nose.plugins.skip import SkipTest sys.path[0:0] = [""] -from bson.objectid import ObjectId -from bson.dbref import DBRef -from bson.min_key import MinKey -from bson.max_key import MaxKey -from bson.timestamp import Timestamp -from bson.tz_util import utc -from bson.json_util import default, object_hook - +from pymongo.json_util import default, object_hook +from pymongo.objectid import ObjectId +from pymongo.dbref import DBRef class TestJsonUtil(unittest.TestCase): @@ -68,45 +58,16 @@ self.round_trip({"ref": DBRef("foo", 5)}) self.round_trip({"ref": DBRef("foo", 5, "db")}) - # TODO this is broken when using cjson. See: - # http://jira.mongodb.org/browse/PYTHON-153 - # http://bugs.python.org/issue6105 - # - # self.assertEqual("{\"ref\": {\"$ref\": \"foo\", \"$id\": 5}}", - # json.dumps({"ref": DBRef("foo", 5)}, - # default=default)) - # self.assertEqual("{\"ref\": {\"$ref\": \"foo\", - # \"$id\": 5, \"$db\": \"bar\"}}", - # json.dumps({"ref": DBRef("foo", 5, "bar")}, - # default=default)) - def test_datetime(self): # only millis, not micros self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000, utc)}) + 49, 45, 191000)}) def test_regex(self): res = self.round_tripped({"r": re.compile("a*b", re.IGNORECASE)})["r"] self.assertEqual("a*b", res.pattern) self.assertEqual(re.IGNORECASE, res.flags) - def test_minkey(self): - self.round_trip({"m": MinKey()}) - - def test_maxkey(self): - self.round_trip({"m": MinKey()}) - - def test_timestamp(self): - res = json.dumps({"ts": Timestamp(4, 13)}, default=default) - dct = json.loads(res) - self.assertEqual(dct['ts']['t'], 4) - self.assertEqual(dct['ts']['i'], 13) - - def test_uuid(self): - if not should_test_uuid: - raise SkipTest() - self.round_trip( - {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')}) if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_master_slave_connection.py pymongo-1.7/test/test_master_slave_connection.py --- pymongo-1.11/test/test_master_slave_connection.py 2011-04-06 19:16:12.000000000 +0000 +++ pymongo-1.7/test/test_master_slave_connection.py 2010-05-19 14:01:01.000000000 +0000 @@ -24,7 +24,6 @@ from pymongo.errors import ConnectionFailure, InvalidName from pymongo.errors import CollectionInvalid, OperationFailure -from pymongo.errors import AutoReconnect from pymongo.database import Database from pymongo.connection import Connection from pymongo.collection import Collection @@ -40,15 +39,17 @@ self.slaves = [] try: self.slaves.append(Connection(os.environ.get("DB_IP2", host), - int(os.environ.get("DB_PORT2", 27018)), - slave_okay=True)) + int(os.environ.get("DB_PORT2", + 27018)), + slave_okay=True)) except ConnectionFailure: pass try: self.slaves.append(Connection(os.environ.get("DB_IP3", host), - int(os.environ.get("DB_PORT3", 27019)), - slave_okay=True)) + int(os.environ.get("DB_PORT3", + 27019)), + slave_okay=True)) except ConnectionFailure: pass @@ -68,90 +69,6 @@ "MasterSlaveConnection(%r, %r)" % (self.master, self.slaves)) - def test_disconnect(self): - class Connection(object): - def __init__(self): - self._disconnects = 0 - - def disconnect(self): - self._disconnects += 1 - - self.connection._MasterSlaveConnection__master = Connection() - self.connection._MasterSlaveConnection__slaves = [Connection(), - Connection()] - - self.connection.disconnect() - self.assertEquals(1, - self.connection._MasterSlaveConnection__master._disconnects) - self.assertEquals(1, - self.connection._MasterSlaveConnection__slaves[0]._disconnects) - self.assertEquals(1, - self.connection._MasterSlaveConnection__slaves[1]._disconnects) - - def test_continue_until_slave_works(self): - class Slave(object): - calls = 0 - - def __init__(self, fail): - self._fail = fail - - def _send_message_with_response(self, *args, **kwargs): - Slave.calls += 1 - if self._fail: - raise AutoReconnect() - return 'sent' - - class NotRandomList(object): - last_idx = -1 - - def __init__(self): - self._items = [Slave(True), Slave(True), - Slave(False), Slave(True)] - - def __len__(self): - return len(self._items) - - def __getitem__(self, idx): - NotRandomList.last_idx = idx - return self._items.pop(0) - - self.connection._MasterSlaveConnection__slaves = NotRandomList() - - response = self.connection._send_message_with_response('message') - self.assertEquals((NotRandomList.last_idx, 'sent'), response) - self.assertNotEquals(-1, NotRandomList.last_idx) - self.assertEquals(3, Slave.calls) - - def test_raise_autoreconnect_if_all_slaves_fail(self): - class Slave(object): - calls = 0 - - def __init__(self, fail): - self._fail = fail - - def _send_message_with_response(self, *args, **kwargs): - Slave.calls += 1 - if self._fail: - raise AutoReconnect() - return 'sent' - - class NotRandomList(object): - def __init__(self): - self._items = [Slave(True), Slave(True), - Slave(True), Slave(True)] - - def __len__(self): - return len(self._items) - - def __getitem__(self, idx): - return self._items.pop(0) - - self.connection._MasterSlaveConnection__slaves = NotRandomList() - - self.assertRaises(AutoReconnect, - self.connection._send_message_with_response, 'message') - self.assertEquals(4, Slave.calls) - def test_get_db(self): def make_db(base, name): @@ -181,7 +98,7 @@ self.assertRaises(TypeError, self.connection.drop_database, 5) self.assertRaises(TypeError, self.connection.drop_database, None) - self.connection.pymongo_test.test.save({"dummy": u"object"}, safe=True) + self.connection.pymongo_test.test.save({"dummy": u"object"}) dbs = self.connection.database_names() self.assert_("pymongo_test" in dbs) self.connection.drop_database("pymongo_test") @@ -214,7 +131,7 @@ except: count += 1 self.connection.end_request() - self.assertFalse(count) + self.failIf(count) # This was failing because commands were being sent to the slaves def test_create_collection(self): @@ -231,8 +148,7 @@ self.db.test.create_index('username', unique=True) self.db.test.save({'username': 'mike'}, safe=True) - self.assertRaises(OperationFailure, - self.db.test.save, {'username': 'mike'}, safe=True) + self.assertRaises(OperationFailure, self.db.test.save, {'username': 'mike'}, safe=True) # NOTE this test is non-deterministic, but I expect # some failures unless the db is pulling instantaneously... @@ -255,14 +171,14 @@ self.db.test.remove({}) self.db.test.insert({"x": 5586}) - time.sleep(11) + time.sleep(7) for _ in range(10): try: if 5586 != self.db.test.find_one()["x"]: count += 1 except: count += 1 - self.assertFalse(count) + self.failIf(count) def test_kill_cursors(self): @@ -282,7 +198,7 @@ for i in range(10000): db.test.insert({"i": i}) - time.sleep(11) # need to sleep to be sure this gets pulled... + time.sleep(6) # need to sleep to be sure this gets pulled... self.assertEqual(before, cursor_count()) diff -Nru pymongo-1.11/test/test_objectid.py pymongo-1.7/test/test_objectid.py --- pymongo-1.11/test/test_objectid.py 2011-04-06 19:19:34.000000000 +0000 +++ pymongo-1.7/test/test_objectid.py 2010-05-19 14:01:01.000000000 +0000 @@ -15,7 +15,6 @@ """Tests for the objectid module.""" import datetime -import pickle import warnings import unittest import sys @@ -24,10 +23,8 @@ from nose.plugins.skip import SkipTest -from bson.errors import InvalidId -from bson.objectid import ObjectId -from bson.tz_util import (FixedOffset, - utc) +from pymongo.objectid import ObjectId +from pymongo.errors import InvalidId def oid(x): @@ -67,14 +64,10 @@ def test_repr_str(self): self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), "ObjectId('1234567890abcdef12345678')") - self.assertEqual(str(ObjectId("1234567890abcdef12345678")), - "1234567890abcdef12345678") - self.assertEqual(str(ObjectId("123456789012")), - "313233343536373839303132") - self.assertEqual(ObjectId("1234567890abcdef12345678").binary, - '\x124Vx\x90\xab\xcd\xef\x124Vx') - self.assertEqual(str(ObjectId('\x124Vx\x90\xab\xcd\xef\x124Vx')), - "1234567890abcdef12345678") + self.assertEqual(str(ObjectId("1234567890abcdef12345678")), "1234567890abcdef12345678") + self.assertEqual(str(ObjectId("123456789012")), "313233343536373839303132") + self.assertEqual(ObjectId("1234567890abcdef12345678").binary, '\x124Vx\x90\xab\xcd\xef\x124Vx') + self.assertEqual(str(ObjectId('\x124Vx\x90\xab\xcd\xef\x124Vx')), "1234567890abcdef12345678") def test_cmp(self): a = ObjectId() @@ -114,53 +107,14 @@ d1 = datetime.datetime.utcnow() d2 = ObjectId().generation_time - self.assertEqual(utc, d2.tzinfo) - d2 = d2.replace(tzinfo=None) - self.assert_(d2 - d1 < datetime.timedelta(seconds=2)) + self.assert_(d2 - d1 < datetime.timedelta(seconds = 2)) def test_from_datetime(self): d = datetime.datetime.utcnow() d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) - self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) + self.assertEqual(d, oid.generation_time) self.assertEqual("0" * 16, str(oid)[8:]) - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) - oid = ObjectId.from_datetime(aware) - self.assertEqual(as_utc, oid.generation_time) - - def test_pickling(self): - orig = ObjectId() - self.assertEqual(orig, pickle.loads(pickle.dumps(orig))) - - def test_pickle_backwards_compatability(self): - - # This string was generated by pickling an ObjectId in pymongo - # version 1.9 - pickled_with_1_9 = ( - "ccopy_reg\n_reconstructor\np0\n" - "(cbson.objectid\nObjectId\np1\nc__builtin__\n" - "object\np2\nNtp3\nRp4\n" - "(dp5\nS'_ObjectId__id'\np6\n" - "S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.") - - # We also test against a hardcoded "New" pickle format so that we - # make sure we're backward compatible with the current version in - # the future as well. - pickled_with_1_10 = ( - "ccopy_reg\n_reconstructor\np0\n" - "(cbson.objectid\nObjectId\np1\nc__builtin__\n" - "object\np2\nNtp3\nRp4\n" - "S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." - ) - - oid_1_9 = pickle.loads(pickled_with_1_9) - oid_1_10 = pickle.loads(pickled_with_1_10) - - self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) - self.assertEqual(oid_1_9, oid_1_10) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_paired.py pymongo-1.7/test/test_paired.py --- pymongo-1.11/test/test_paired.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/test/test_paired.py 2010-05-19 14:01:01.000000000 +0000 @@ -28,7 +28,7 @@ import warnings sys.path[0:0] = [""] -from pymongo.errors import ConnectionFailure +from pymongo.errors import ConnectionFailure, ConfigurationError from pymongo.connection import Connection skip_tests = True @@ -83,9 +83,9 @@ self.assertEqual(port, connection.port) slave = self.left == (host, port) and self.right or self.left - self.assertRaises(ConnectionFailure, Connection.paired, + self.assertRaises(ConfigurationError, Connection.paired, slave, self.bad) - self.assertRaises(ConnectionFailure, Connection.paired, + self.assertRaises(ConfigurationError, Connection.paired, self.bad, slave) def test_repr(self): @@ -93,7 +93,7 @@ connection = Connection.paired(self.left, self.right) self.assertEqual(repr(connection), - "Connection(['%s:%s', '%s:%s'])" % + "Connection.paired(('%s', %s), ('%s', %s))" % (self.left[0], self.left[1], self.right[0], diff -Nru pymongo-1.11/test/test_pooling.py pymongo-1.7/test/test_pooling.py --- pymongo-1.11/test/test_pooling.py 2011-04-18 17:47:57.000000000 +0000 +++ pymongo-1.7/test/test_pooling.py 2010-06-04 17:58:49.000000000 +0000 @@ -14,23 +14,21 @@ """Test built in connection-pooling.""" +import unittest +import threading import os import random import sys -import threading -import time -import unittest sys.path[0:0] = [""] from nose.plugins.skip import SkipTest -from pymongo.connection import Connection, _Pool +from pymongo.connection import Pool from test_connection import get_connection N = 50 DB = "pymongo-pooling-tests" - class MongoThread(threading.Thread): def __init__(self, test_case): @@ -113,18 +111,6 @@ assert len(self.c._Connection__pool.sockets) == 1 -class CreateAndReleaseSocket(threading.Thread): - - def __init__(self, connection): - threading.Thread.__init__(self) - self.c = connection - - def run(self): - self.c.test.test.find_one() - time.sleep(1) - self.c.end_request() - - class TestPooling(unittest.TestCase): def setUp(self): @@ -135,20 +121,6 @@ self.c[DB].unique.insert({"_id": "mike"}) self.c[DB].unique.find_one() - def test_max_pool_size_validation(self): - self.assertRaises(ValueError, Connection, max_pool_size=-1) - self.assertRaises(TypeError, Connection, max_pool_size='foo') - self.assertRaises(TypeError, Connection, - 'mongodb://localhost/?maxPoolSize=-1') - self.assertRaises(TypeError, Connection, - 'mongodb://localhost/?maxPoolSize=foo') - self.assertRaises(TypeError, Connection, - 'mongodb://localhost/?maxPoolSize=5.5') - c = Connection('mongodb://localhost/?maxPoolSize=5') - self.assertEqual(c.max_pool_size, 5) - c = Connection(max_pool_size=100) - self.assertEqual(c.max_pool_size, 100) - def test_no_disconnect(self): run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind]) @@ -156,19 +128,19 @@ run_cases(self, [SaveAndFind, Disconnect, Unique]) def test_independent_pools(self): - p = _Pool(None, 10) + p = Pool(None) self.assertEqual([], p.sockets) self.c.end_request() self.assertEqual([], p.sockets) # Sensical values aren't really important here - p1 = _Pool(5, 10) + p1 = Pool(5) self.assertEqual(None, p.socket_factory) self.assertEqual(5, p1.socket_factory) def test_dependent_pools(self): c = get_connection() - self.assertEqual(1, len(c._Connection__pool.sockets)) + self.assertEqual(0, len(c._Connection__pool.sockets)) c.test.test.find_one() self.assertEqual(0, len(c._Connection__pool.sockets)) c.end_request() @@ -185,18 +157,18 @@ def test_multiple_connections(self): a = get_connection() b = get_connection() - self.assertEqual(1, len(a._Connection__pool.sockets)) - self.assertEqual(1, len(b._Connection__pool.sockets)) + self.assertEqual(0, len(a._Connection__pool.sockets)) + self.assertEqual(0, len(b._Connection__pool.sockets)) a.test.test.find_one() a.end_request() self.assertEqual(1, len(a._Connection__pool.sockets)) - self.assertEqual(1, len(b._Connection__pool.sockets)) + self.assertEqual(0, len(b._Connection__pool.sockets)) a_sock = a._Connection__pool.sockets[0] b.end_request() self.assertEqual(1, len(a._Connection__pool.sockets)) - self.assertEqual(1, len(b._Connection__pool.sockets)) + self.assertEqual(0, len(b._Connection__pool.sockets)) b.test.test.find_one() self.assertEqual(1, len(a._Connection__pool.sockets)) @@ -226,9 +198,8 @@ def loop(pipe): c = get_connection() - self.assertEqual(1, len(c._Connection__pool.sockets)) - c.test.test.find_one() self.assertEqual(0, len(c._Connection__pool.sockets)) + c.test.test.find_one() c.end_request() self.assertEqual(1, len(c._Connection__pool.sockets)) pipe.send(c._Connection__pool.sockets[0].getsockname()) @@ -261,21 +232,6 @@ self.assert_(b_sock != c_sock) self.assertEqual(a_sock, a._Connection__pool.socket()) - def test_max_pool_size(self): - c = get_connection(max_pool_size=4) - - threads = [] - for i in range(40): - t = CreateAndReleaseSocket(c) - t.start() - threads.append(t) - - for t in threads: - t.join() - - # There's a race condition, so be lenient - self.assert_(abs(4 - len(c._Connection__pool.sockets)) < 4) - if __name__ == "__main__": unittest.main() diff -Nru pymongo-1.11/test/test_son_manipulator.py pymongo-1.7/test/test_son_manipulator.py --- pymongo-1.11/test/test_son_manipulator.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/test/test_son_manipulator.py 2010-05-19 14:01:01.000000000 +0000 @@ -19,15 +19,13 @@ import sys sys.path[0:0] = [""] -from bson.objectid import ObjectId -from bson.son import SON +import qcheck +from pymongo.objectid import ObjectId +from pymongo.son import SON +from pymongo.son_manipulator import SONManipulator, ObjectIdInjector +from pymongo.son_manipulator import NamespaceInjector, ObjectIdShuffler from pymongo.database import Database -from pymongo.son_manipulator import (NamespaceInjector, - ObjectIdInjector, - ObjectIdShuffler, - SONManipulator) from test_connection import get_connection -import qcheck class TestSONManipulator(unittest.TestCase): diff -Nru pymongo-1.11/test/test_son.py pymongo-1.7/test/test_son.py --- pymongo-1.11/test/test_son.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/test/test_son.py 2010-05-19 14:01:01.000000000 +0000 @@ -18,7 +18,7 @@ import sys sys.path[0:0] = [""] -from bson.son import SON +from pymongo.son import SON class TestSON(unittest.TestCase): diff -Nru pymongo-1.11/test/test_threads.py pymongo-1.7/test/test_threads.py --- pymongo-1.11/test/test_threads.py 2011-04-06 19:22:37.000000000 +0000 +++ pymongo-1.7/test/test_threads.py 2010-06-16 19:05:40.000000000 +0000 @@ -72,8 +72,7 @@ error = True try: - self.collection.update({"test": "unique"}, - {"$set": {"test": "update"}}, safe=True) + self.collection.update({"test": "unique"}, {"$set": {"test": "update"}}, safe=True) error = False except: if not self.expect_exception: diff -Nru pymongo-1.11/test/test_timestamp.py pymongo-1.7/test/test_timestamp.py --- pymongo-1.11/test/test_timestamp.py 2011-04-06 19:23:06.000000000 +0000 +++ pymongo-1.7/test/test_timestamp.py 2010-05-19 14:01:01.000000000 +0000 @@ -19,8 +19,7 @@ import sys sys.path[0:0] = [""] -from bson.timestamp import Timestamp -from bson.tz_util import utc +from pymongo.timestamp import Timestamp class TestTimestamp(unittest.TestCase): @@ -35,7 +34,7 @@ self.assert_(isinstance(t, Timestamp)) def test_datetime(self): - d = datetime.datetime(2010, 5, 5, tzinfo=utc) + d = datetime.datetime(2010, 5, 5) t = Timestamp(d, 0) self.assertEqual(1273017600, t.time) self.assertEqual(d, t.as_datetime()) @@ -51,10 +50,10 @@ self.assert_(Timestamp(0, 0)) def test_equality(self): - t = Timestamp(1, 1) - self.assertNotEqual(t, Timestamp(0, 1)) - self.assertNotEqual(t, Timestamp(1, 0)) - self.assertEqual(t, Timestamp(1, 1)) + t = Timestamp(1,1) + self.assertNotEqual(t, Timestamp(0,1)) + self.assertNotEqual(t, Timestamp(1,0)) + self.assertEqual(t, Timestamp(1,1)) def test_repr(self): t = Timestamp(0, 0) diff -Nru pymongo-1.11/test/version.py pymongo-1.7/test/version.py --- pymongo-1.11/test/version.py 2011-04-06 20:05:22.000000000 +0000 +++ pymongo-1.7/test/version.py 2010-05-19 14:01:01.000000000 +0000 @@ -14,7 +14,6 @@ """Some tools for running tests based on MongoDB server version.""" - def _padded(iter, length, padding=0): l = list(iter) if len(l) < length: @@ -22,7 +21,6 @@ l.append(0) return l - def _parse_version_string(version_string): mod = 0 if version_string.endswith("+"): @@ -34,10 +32,6 @@ elif version_string.endswith("-"): version_string = version_string[0:-1] mod = -1 - # Deal with '-rcX' substrings - if version_string.find('-rc') != -1: - version_string = version_string[0:version_string.find('-rc')] - mod = -1 version = [int(part) for part in version_string.split(".")] version = _padded(version, 3) @@ -45,11 +39,9 @@ return tuple(version) - # Note this is probably broken for very old versions of the database... def version(connection): return _parse_version_string(connection.server_info()["version"]) - def at_least(connection, min_version): return version(connection) >= tuple(_padded(min_version, 4)) diff -Nru pymongo-1.11/tools/auto_reconnect_test.py pymongo-1.7/tools/auto_reconnect_test.py --- pymongo-1.11/tools/auto_reconnect_test.py 2011-04-06 20:06:03.000000000 +0000 +++ pymongo-1.7/tools/auto_reconnect_test.py 2010-05-19 14:01:01.000000000 +0000 @@ -14,29 +14,26 @@ """Simple script to help test auto-reconnection.""" -import sys import threading import time -sys.path[0:0] = [""] -from pymongo.errors import AutoReconnect +from pymongo.errors import ConnectionFailure from pymongo.connection import Connection db = Connection.paired(("localhost", 27018)).test db.test.remove({}) - class Something(threading.Thread): def run(self): while True: time.sleep(1) try: - id = db.test.save({"x": 1}, safe=True) + id = db.test.save({"x": 1}) assert db.test.find_one(id)["x"] == 1 db.test.remove(id) db.connection.end_request() print "Y" - except AutoReconnect, e: + except ConnectionFailure, e: print e print "N" diff -Nru pymongo-1.11/tools/benchmark.py pymongo-1.7/tools/benchmark.py --- pymongo-1.11/tools/benchmark.py 2011-04-06 20:11:06.000000000 +0000 +++ pymongo-1.7/tools/benchmark.py 2010-05-19 14:01:01.000000000 +0000 @@ -45,40 +45,33 @@ "no_of_js_attached": 10, "no_of_images": 6 }, - "harvested_words": ["10gen", "web", "open", "source", "application", - "paas", "platform-as-a-service", "technology", - "helps", "developers", "focus", "building", - "mongodb", "mongo"] * 20 + "harvested_words": ["10gen","web","open","source","application","paas", + "platform-as-a-service","technology","helps", + "developers","focus","building","mongodb","mongo"] * 20 } - def setup_insert(db, collection, object): db.drop_collection(collection) - def insert(db, collection, object): for i in range(per_trial): to_insert = object.copy() to_insert["x"] = i db[collection].insert(to_insert) - def insert_batch(db, collection, object): for i in range(per_trial / batch_size): db[collection].insert([object] * batch_size) - def find_one(db, collection, x): for _ in range(per_trial): db[collection].find_one({"x": x}) - def find(db, collection, x): for _ in range(per_trial): for _ in db[collection].find({"x": x}): pass - def timed(name, function, args=[], setup=None): times = [] for _ in range(trials): @@ -91,19 +84,15 @@ print "%s%d" % (name + (60 - len(name)) * ".", per_trial / best_time) return best_time - def main(): - connection._TIMEOUT = 60 # jack up the timeout + connection._TIMEOUT=60 # jack up the timeout c = connection.Connection() c.drop_database("benchmark") db = c.benchmark - timed("insert (small, no index)", insert, - [db, 'small_none', small], setup_insert) - timed("insert (medium, no index)", insert, - [db, 'medium_none', medium], setup_insert) - timed("insert (large, no index)", insert, - [db, 'large_none', large], setup_insert) + timed("insert (small, no index)", insert, [db, 'small_none', small], setup_insert) + timed("insert (medium, no index)", insert, [db, 'medium_none', medium], setup_insert) + timed("insert (large, no index)", insert, [db, 'large_none', large], setup_insert) db.small_index.create_index("x", ASCENDING) timed("insert (small, indexed)", insert, [db, 'small_index', small]) @@ -112,26 +101,17 @@ db.large_index.create_index("x", ASCENDING) timed("insert (large, indexed)", insert, [db, 'large_index', large]) - timed("batch insert (small, no index)", insert_batch, - [db, 'small_bulk', small], setup_insert) - timed("batch insert (medium, no index)", insert_batch, - [db, 'medium_bulk', medium], setup_insert) - timed("batch insert (large, no index)", insert_batch, - [db, 'large_bulk', large], setup_insert) - - timed("find_one (small, no index)", find_one, - [db, 'small_none', per_trial / 2]) - timed("find_one (medium, no index)", find_one, - [db, 'medium_none', per_trial / 2]) - timed("find_one (large, no index)", find_one, - [db, 'large_none', per_trial / 2]) - - timed("find_one (small, indexed)", find_one, - [db, 'small_index', per_trial / 2]) - timed("find_one (medium, indexed)", find_one, - [db, 'medium_index', per_trial / 2]) - timed("find_one (large, indexed)", find_one, - [db, 'large_index', per_trial / 2]) + timed("batch insert (small, no index)", insert_batch, [db, 'small_bulk', small], setup_insert) + timed("batch insert (medium, no index)", insert_batch, [db, 'medium_bulk', medium], setup_insert) + timed("batch insert (large, no index)", insert_batch, [db, 'large_bulk', large], setup_insert) + + timed("find_one (small, no index)", find_one, [db, 'small_none', per_trial / 2]) + timed("find_one (medium, no index)", find_one, [db, 'medium_none', per_trial / 2]) + timed("find_one (large, no index)", find_one, [db, 'large_none', per_trial / 2]) + + timed("find_one (small, indexed)", find_one, [db, 'small_index', per_trial / 2]) + timed("find_one (medium, indexed)", find_one, [db, 'medium_index', per_trial / 2]) + timed("find_one (large, indexed)", find_one, [db, 'large_index', per_trial / 2]) timed("find (small, no index)", find, [db, 'small_none', per_trial / 2]) timed("find (medium, no index)", find, [db, 'medium_none', per_trial / 2]) @@ -142,24 +122,18 @@ timed("find (large, indexed)", find, [db, 'large_index', per_trial / 2]) # timed("find range (small, no index)", find, -# [db, 'small_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) +# [db, 'small_none', {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) # timed("find range (medium, no index)", find, -# [db, 'medium_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) +# [db, 'medium_none', {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) # timed("find range (large, no index)", find, -# [db, 'large_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) +# [db, 'large_none', {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) timed("find range (small, indexed)", find, - [db, 'small_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) + [db, 'small_index', {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) timed("find range (medium, indexed)", find, - [db, 'medium_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) + [db, 'medium_index', {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) timed("find range (large, indexed)", find, - [db, 'large_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) + [db, 'large_index', {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) if __name__ == "__main__": # cProfile.run("main()") diff -Nru pymongo-1.11/tools/clean.py pymongo-1.7/tools/clean.py --- pymongo-1.11/tools/clean.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/tools/clean.py 2010-05-19 14:01:01.000000000 +0000 @@ -21,25 +21,17 @@ import sys try: - os.remove("pymongo/_cmessage.so") - os.remove("bson/_cbson.so") + os.remove("pymongo/_cbson.so") except: pass try: - os.remove("pymongo/_cmessage.pyd") - os.remove("bson/_cbson.pyd") + os.remove("pymongo/_cbson.pyd") except: pass try: - from pymongo import _cmessage - sys.exit("could still import _cmessage") -except ImportError: - pass - -try: - from bson import _cbson + from pymongo import _cbson sys.exit("could still import _cbson") except ImportError: pass diff -Nru pymongo-1.11/tools/fail_if_no_c.py pymongo-1.7/tools/fail_if_no_c.py --- pymongo-1.11/tools/fail_if_no_c.py 2011-02-18 00:12:36.000000000 +0000 +++ pymongo-1.7/tools/fail_if_no_c.py 2010-05-19 14:01:01.000000000 +0000 @@ -20,8 +20,7 @@ import sys sys.path[0:0] = [""] -import bson import pymongo -if not pymongo.has_c() or not bson.has_c(): - sys.exit("could not load C extensions") +if not pymongo.has_c(): + sys.exit("could not import _cbson")