diff -Nru libcloud-0.5.0/apache_libcloud.egg-info/dependency_links.txt libcloud-0.15.1/apache_libcloud.egg-info/dependency_links.txt --- libcloud-0.5.0/apache_libcloud.egg-info/dependency_links.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/apache_libcloud.egg-info/dependency_links.txt 2014-07-02 21:19:06.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/apache_libcloud.egg-info/not-zip-safe libcloud-0.15.1/apache_libcloud.egg-info/not-zip-safe --- libcloud-0.5.0/apache_libcloud.egg-info/not-zip-safe 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/apache_libcloud.egg-info/not-zip-safe 2014-07-02 20:55:10.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/apache_libcloud.egg-info/PKG-INFO libcloud-0.15.1/apache_libcloud.egg-info/PKG-INFO --- libcloud-0.5.0/apache_libcloud.egg-info/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/apache_libcloud.egg-info/PKG-INFO 2014-07-02 21:19:06.000000000 +0000 @@ -0,0 +1,27 @@ +Metadata-Version: 1.1 +Name: apache-libcloud +Version: 0.15.1 +Summary: A standard Python library that abstracts away differences among multiple cloud provider APIs. For more information and documentation, please see http://libcloud.apache.org +Home-page: http://libcloud.apache.org/ +Author: Apache Software Foundation +Author-email: dev@libcloud.apache.org +License: Apache License (2.0) +Description: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: PyPy diff -Nru libcloud-0.5.0/apache_libcloud.egg-info/SOURCES.txt libcloud-0.15.1/apache_libcloud.egg-info/SOURCES.txt --- libcloud-0.5.0/apache_libcloud.egg-info/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/apache_libcloud.egg-info/SOURCES.txt 2014-07-02 21:19:06.000000000 +0000 @@ -0,0 +1,1118 @@ +CHANGES.rst +LICENSE +MANIFEST.in +NOTICE +README.rst +example_compute.py +example_dns.py +example_loadbalancer.py +example_storage.py +setup.cfg +setup.py +tox.ini +apache_libcloud.egg-info/PKG-INFO +apache_libcloud.egg-info/SOURCES.txt +apache_libcloud.egg-info/dependency_links.txt +apache_libcloud.egg-info/not-zip-safe +apache_libcloud.egg-info/top_level.txt +demos/compute_demo.py +demos/gce_demo.py +demos/gce_lb_demo.py +demos/secrets.py-dist +libcloud/__init__.py +libcloud/httplib_ssl.py +libcloud/pricing.py +libcloud/security.py +libcloud/common/__init__.py +libcloud/common/abiquo.py +libcloud/common/aws.py +libcloud/common/azure.py +libcloud/common/base.py +libcloud/common/brightbox.py +libcloud/common/cloudsigma.py +libcloud/common/cloudstack.py +libcloud/common/gandi.py +libcloud/common/gogrid.py +libcloud/common/google.py +libcloud/common/hostvirtual.py +libcloud/common/linode.py +libcloud/common/openstack.py +libcloud/common/rackspace.py +libcloud/common/types.py +libcloud/common/xmlrpc.py +libcloud/compute/__init__.py +libcloud/compute/base.py +libcloud/compute/deployment.py +libcloud/compute/providers.py +libcloud/compute/ssh.py +libcloud/compute/types.py +libcloud/compute/drivers/__init__.py +libcloud/compute/drivers/abiquo.py +libcloud/compute/drivers/bluebox.py +libcloud/compute/drivers/brightbox.py +libcloud/compute/drivers/cloudframes.py +libcloud/compute/drivers/cloudsigma.py +libcloud/compute/drivers/cloudstack.py +libcloud/compute/drivers/digitalocean.py +libcloud/compute/drivers/dreamhost.py +libcloud/compute/drivers/dummy.py +libcloud/compute/drivers/ec2.py +libcloud/compute/drivers/ecp.py +libcloud/compute/drivers/elastichosts.py +libcloud/compute/drivers/elasticstack.py +libcloud/compute/drivers/exoscale.py +libcloud/compute/drivers/gandi.py +libcloud/compute/drivers/gce.py +libcloud/compute/drivers/gogrid.py +libcloud/compute/drivers/gridspot.py +libcloud/compute/drivers/hostvirtual.py +libcloud/compute/drivers/hpcloud.py +libcloud/compute/drivers/ibm_sce.py +libcloud/compute/drivers/ikoula.py +libcloud/compute/drivers/joyent.py +libcloud/compute/drivers/kili.py +libcloud/compute/drivers/ktucloud.py +libcloud/compute/drivers/libvirt_driver.py +libcloud/compute/drivers/linode.py +libcloud/compute/drivers/nephoscale.py +libcloud/compute/drivers/ninefold.py +libcloud/compute/drivers/opennebula.py +libcloud/compute/drivers/openstack.py +libcloud/compute/drivers/opsource.py +libcloud/compute/drivers/rackspace.py +libcloud/compute/drivers/rimuhosting.py +libcloud/compute/drivers/serverlove.py +libcloud/compute/drivers/skalicloud.py +libcloud/compute/drivers/softlayer.py +libcloud/compute/drivers/vcl.py +libcloud/compute/drivers/vcloud.py +libcloud/compute/drivers/voxel.py +libcloud/compute/drivers/vpsnet.py +libcloud/data/pricing.json +libcloud/dns/__init__.py +libcloud/dns/base.py +libcloud/dns/providers.py +libcloud/dns/types.py +libcloud/dns/drivers/__init__.py +libcloud/dns/drivers/dummy.py +libcloud/dns/drivers/gandi.py +libcloud/dns/drivers/google.py +libcloud/dns/drivers/hostvirtual.py +libcloud/dns/drivers/linode.py +libcloud/dns/drivers/rackspace.py +libcloud/dns/drivers/route53.py +libcloud/dns/drivers/zerigo.py +libcloud/loadbalancer/__init__.py +libcloud/loadbalancer/base.py +libcloud/loadbalancer/providers.py +libcloud/loadbalancer/types.py +libcloud/loadbalancer/drivers/__init__.py +libcloud/loadbalancer/drivers/brightbox.py +libcloud/loadbalancer/drivers/cloudstack.py +libcloud/loadbalancer/drivers/elb.py +libcloud/loadbalancer/drivers/gce.py +libcloud/loadbalancer/drivers/gogrid.py +libcloud/loadbalancer/drivers/ninefold.py +libcloud/loadbalancer/drivers/rackspace.py +libcloud/storage/__init__.py +libcloud/storage/base.py +libcloud/storage/providers.py +libcloud/storage/types.py +libcloud/storage/drivers/__init__.py +libcloud/storage/drivers/atmos.py +libcloud/storage/drivers/azure_blobs.py +libcloud/storage/drivers/cloudfiles.py +libcloud/storage/drivers/dummy.py +libcloud/storage/drivers/google_storage.py +libcloud/storage/drivers/ktucloud.py +libcloud/storage/drivers/local.py +libcloud/storage/drivers/nimbus.py +libcloud/storage/drivers/ninefold.py +libcloud/storage/drivers/s3.py +libcloud/test/__init__.py +libcloud/test/file_fixtures.py +libcloud/test/pricing_test.json +libcloud/test/secrets.py-dist +libcloud/test/test_connection.py +libcloud/test/test_file_fixtures.py +libcloud/test/test_httplib_ssl.py +libcloud/test/test_pricing.py +libcloud/test/test_response_classes.py +libcloud/test/test_types.py +libcloud/test/test_utils.py +libcloud/test/common/__init__.py +libcloud/test/common/test_cloudstack.py +libcloud/test/common/test_gandi.py +libcloud/test/common/test_google.py +libcloud/test/common/test_openstack.py +libcloud/test/compute/__init__.py +libcloud/test/compute/test_abiquo.py +libcloud/test/compute/test_base.py +libcloud/test/compute/test_bluebox.py +libcloud/test/compute/test_brightbox.py +libcloud/test/compute/test_cloudframes.py +libcloud/test/compute/test_cloudsigma_v1_0.py +libcloud/test/compute/test_cloudsigma_v2_0.py +libcloud/test/compute/test_cloudstack.py +libcloud/test/compute/test_deployment.py +libcloud/test/compute/test_digitalocean.py +libcloud/test/compute/test_dreamhost.py +libcloud/test/compute/test_ec2.py +libcloud/test/compute/test_ecp.py +libcloud/test/compute/test_elasticstack.py +libcloud/test/compute/test_exoscale.py +libcloud/test/compute/test_gandi.py +libcloud/test/compute/test_gce.py +libcloud/test/compute/test_gogrid.py +libcloud/test/compute/test_gridspot.py +libcloud/test/compute/test_hostvirtual.py +libcloud/test/compute/test_ibm_sce.py +libcloud/test/compute/test_ikoula.py +libcloud/test/compute/test_joyent.py +libcloud/test/compute/test_ktucloud.py +libcloud/test/compute/test_linode.py +libcloud/test/compute/test_nephoscale.py +libcloud/test/compute/test_opennebula.py +libcloud/test/compute/test_openstack.py +libcloud/test/compute/test_opsource.py +libcloud/test/compute/test_rackspace.py +libcloud/test/compute/test_rimuhosting.py +libcloud/test/compute/test_softlayer.py +libcloud/test/compute/test_ssh_client.py +libcloud/test/compute/test_vcl.py +libcloud/test/compute/test_vcloud.py +libcloud/test/compute/test_voxel.py +libcloud/test/compute/test_vpsnet.py +libcloud/test/compute/fixtures/abiquo/dcs.xml +libcloud/test/compute/fixtures/abiquo/ent_1.xml +libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml +libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml +libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml +libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml +libcloud/test/compute/fixtures/abiquo/login.xml +libcloud/test/compute/fixtures/abiquo/not_found_error.xml +libcloud/test/compute/fixtures/abiquo/privilege_errors.html +libcloud/test/compute/fixtures/abiquo/unauthorized_user.html +libcloud/test/compute/fixtures/abiquo/vdc_4.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml +libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml +libcloud/test/compute/fixtures/abiquo/vdcs.xml +libcloud/test/compute/fixtures/bluebox/api_block_products_json.json +libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json +libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json +libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json +libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json +libcloud/test/compute/fixtures/bluebox/api_blocks_json.json +libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json +libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json +libcloud/test/compute/fixtures/brightbox/create_server.json +libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json +libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json +libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json +libcloud/test/compute/fixtures/brightbox/list_images.json +libcloud/test/compute/fixtures/brightbox/list_server_types.json +libcloud/test/compute/fixtures/brightbox/list_servers.json +libcloud/test/compute/fixtures/brightbox/list_zones.json +libcloud/test/compute/fixtures/brightbox/token.json +libcloud/test/compute/fixtures/cloudframes/_cloudspace_find.xml +libcloud/test/compute/fixtures/cloudframes/_lan_find.xml +libcloud/test/compute/fixtures/cloudframes/_machine_createFromTemplate.xml +libcloud/test/compute/fixtures/cloudframes/_machine_delete.xml +libcloud/test/compute/fixtures/cloudframes/_machine_find_physical.xml +libcloud/test/compute/fixtures/cloudframes/_machine_find_templates.xml +libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualdesktop.xml +libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualserver.xml +libcloud/test/compute/fixtures/cloudframes/_machine_listSnapshots.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_01dedf71-0c37-441e-9687-085f8bb116ea.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_0c4da918-9f88-4049-a09c-8ab69142736a.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_1dd57d0d-0e23-471d-9f34-b673c7c18bc3.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_2aea45ee-3ea5-4b4f-88f0-7d4d48bed643.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_64f325ef-28ac-4907-bd37-572a13178edd.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_96b2af78-88a0-48a6-a5bd-258e1d00c0b9.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_9a6b3101-b4ac-4ecb-b114-67d89994ac9b.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_c52e4a42-72fe-4f34-bb80-c57d237fcbf9.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_d3c98151-f064-45fc-a90a-23c481723895.xml +libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_dea11e50-1b53-4046-8589-cf52eb7b0d25.xml +libcloud/test/compute/fixtures/cloudframes/_machine_reboot.xml +libcloud/test/compute/fixtures/cloudframes/_machine_rollback.xml +libcloud/test/compute/fixtures/cloudframes/_machine_snapshot.xml +libcloud/test/compute/fixtures/cloudframes/_machine_start.xml +libcloud/test/compute/fixtures/cloudframes/_machine_stop.xml +libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt +libcloud/test/compute/fixtures/cloudsigma/drives_info.txt +libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt +libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt +libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt +libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt +libcloud/test/compute/fixtures/cloudsigma/servers_create.txt +libcloud/test/compute/fixtures/cloudsigma/servers_info.txt +libcloud/test/compute/fixtures/cloudsigma/servers_set.txt +libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json +libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json +libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json +libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json +libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json +libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json +libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json +libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json +libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json +libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json +libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json +libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json +libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json +libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json +libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json +libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json +libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json +libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json +libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json +libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json +libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json +libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json +libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json +libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json +libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupEgress_default.json +libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json +libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json +libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json +libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json +libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json +libcloud/test/compute/fixtures/cloudstack/createTags_default.json +libcloud/test/compute/fixtures/cloudstack/createVolume_default.json +libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json +libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json +libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json +libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json +libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json +libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json +libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json +libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json +libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json +libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json +libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json +libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub +libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json +libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json +libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json +libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json +libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json +libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json +libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json +libcloud/test/compute/fixtures/cloudstack/listProjects_default.json +libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json +libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json +libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json +libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json +libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json +libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json +libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json +libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json +libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json +libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json +libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json +libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json +libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json +libcloud/test/compute/fixtures/cloudstack/listZones_default.json +libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json +libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json +libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17201.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17202.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17203.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json +libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json +libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json +libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json +libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json +libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupEgress_default.json +libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupIngress_default.json +libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json +libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json +libcloud/test/compute/fixtures/digitalocean/create_node.json +libcloud/test/compute/fixtures/digitalocean/destroy_node.json +libcloud/test/compute/fixtures/digitalocean/error.txt +libcloud/test/compute/fixtures/digitalocean/error_invalid_image.json +libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json +libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json +libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json +libcloud/test/compute/fixtures/digitalocean/ex_rename_node.json +libcloud/test/compute/fixtures/digitalocean/list_images.json +libcloud/test/compute/fixtures/digitalocean/list_locations.json +libcloud/test/compute/fixtures/digitalocean/list_nodes.json +libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json +libcloud/test/compute/fixtures/digitalocean/list_sizes.json +libcloud/test/compute/fixtures/digitalocean/reboot_node.json +libcloud/test/compute/fixtures/ec2/allocate_address.xml +libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml +libcloud/test/compute/fixtures/ec2/associate_address.xml +libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml +libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml +libcloud/test/compute/fixtures/ec2/attach_network_interface.xml +libcloud/test/compute/fixtures/ec2/attach_volume.xml +libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml +libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml +libcloud/test/compute/fixtures/ec2/copy_image.xml +libcloud/test/compute/fixtures/ec2/create_image.xml +libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml +libcloud/test/compute/fixtures/ec2/create_key_pair.xml +libcloud/test/compute/fixtures/ec2/create_network_interface.xml +libcloud/test/compute/fixtures/ec2/create_security_group.xml +libcloud/test/compute/fixtures/ec2/create_snapshot.xml +libcloud/test/compute/fixtures/ec2/create_subnet.xml +libcloud/test/compute/fixtures/ec2/create_tags.xml +libcloud/test/compute/fixtures/ec2/create_volume.xml +libcloud/test/compute/fixtures/ec2/create_vpc.xml +libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml +libcloud/test/compute/fixtures/ec2/delete_key_pair.xml +libcloud/test/compute/fixtures/ec2/delete_network_interface.xml +libcloud/test/compute/fixtures/ec2/delete_security_group.xml +libcloud/test/compute/fixtures/ec2/delete_snapshot.xml +libcloud/test/compute/fixtures/ec2/delete_subnet.xml +libcloud/test/compute/fixtures/ec2/delete_tags.xml +libcloud/test/compute/fixtures/ec2/delete_volume.xml +libcloud/test/compute/fixtures/ec2/delete_vpc.xml +libcloud/test/compute/fixtures/ec2/deregister_image.xml +libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml +libcloud/test/compute/fixtures/ec2/describe_addresses.xml +libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml +libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml +libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml +libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml +libcloud/test/compute/fixtures/ec2/describe_images.xml +libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml +libcloud/test/compute/fixtures/ec2/describe_instance_types.xml +libcloud/test/compute/fixtures/ec2/describe_instances.xml +libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml +libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml +libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml +libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml +libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml +libcloud/test/compute/fixtures/ec2/describe_security_groups.xml +libcloud/test/compute/fixtures/ec2/describe_snapshots.xml +libcloud/test/compute/fixtures/ec2/describe_subnets.xml +libcloud/test/compute/fixtures/ec2/describe_tags.xml +libcloud/test/compute/fixtures/ec2/describe_volumes.xml +libcloud/test/compute/fixtures/ec2/describe_vpcs.xml +libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml +libcloud/test/compute/fixtures/ec2/detach_network_interface.xml +libcloud/test/compute/fixtures/ec2/detach_volume.xml +libcloud/test/compute/fixtures/ec2/disassociate_address.xml +libcloud/test/compute/fixtures/ec2/get_console_output.xml +libcloud/test/compute/fixtures/ec2/import_key_pair.xml +libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml +libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml +libcloud/test/compute/fixtures/ec2/reboot_instances.xml +libcloud/test/compute/fixtures/ec2/register_image.xml +libcloud/test/compute/fixtures/ec2/release_address.xml +libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml +libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml +libcloud/test/compute/fixtures/ec2/run_instances.xml +libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml +libcloud/test/compute/fixtures/ec2/run_instances_idem.xml +libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml +libcloud/test/compute/fixtures/ec2/start_instances.xml +libcloud/test/compute/fixtures/ec2/stop_instances.xml +libcloud/test/compute/fixtures/ec2/terminate_instances.xml +libcloud/test/compute/fixtures/ecp/htemplate_list.json +libcloud/test/compute/fixtures/ecp/network_list.json +libcloud/test/compute/fixtures/ecp/ptemplate_list.json +libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json +libcloud/test/compute/fixtures/ecp/vm_1_action_start.json +libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json +libcloud/test/compute/fixtures/ecp/vm_1_get.json +libcloud/test/compute/fixtures/ecp/vm_list.json +libcloud/test/compute/fixtures/ecp/vm_put.json +libcloud/test/compute/fixtures/elastichosts/drives_create.json +libcloud/test/compute/fixtures/elastichosts/drives_info.json +libcloud/test/compute/fixtures/elastichosts/servers_create.json +libcloud/test/compute/fixtures/elastichosts/servers_info.json +libcloud/test/compute/fixtures/gandi/account_info.xml +libcloud/test/compute/fixtures/gandi/account_info_rating.xml +libcloud/test/compute/fixtures/gandi/datacenter_list.xml +libcloud/test/compute/fixtures/gandi/disk_attach.xml +libcloud/test/compute/fixtures/gandi/disk_create.xml +libcloud/test/compute/fixtures/gandi/disk_create_from.xml +libcloud/test/compute/fixtures/gandi/disk_delete.xml +libcloud/test/compute/fixtures/gandi/disk_detach.xml +libcloud/test/compute/fixtures/gandi/disk_info.xml +libcloud/test/compute/fixtures/gandi/disk_list.xml +libcloud/test/compute/fixtures/gandi/disk_update.xml +libcloud/test/compute/fixtures/gandi/iface_attach.xml +libcloud/test/compute/fixtures/gandi/iface_detach.xml +libcloud/test/compute/fixtures/gandi/iface_list.xml +libcloud/test/compute/fixtures/gandi/image_list_dc0.xml +libcloud/test/compute/fixtures/gandi/ip_list.xml +libcloud/test/compute/fixtures/gandi/operation_info.xml +libcloud/test/compute/fixtures/gandi/vm_create_from.xml +libcloud/test/compute/fixtures/gandi/vm_delete.xml +libcloud/test/compute/fixtures/gandi/vm_info.xml +libcloud/test/compute/fixtures/gandi/vm_list.xml +libcloud/test/compute/fixtures/gandi/vm_reboot.xml +libcloud/test/compute/fixtures/gandi/vm_stop.xml +libcloud/test/compute/fixtures/gce/aggregated_addresses.json +libcloud/test/compute/fixtures/gce/aggregated_disks.json +libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json +libcloud/test/compute/fixtures/gce/aggregated_instances.json +libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json +libcloud/test/compute/fixtures/gce/aggregated_targetPools.json +libcloud/test/compute/fixtures/gce/generic_disk.json +libcloud/test/compute/fixtures/gce/global_firewalls.json +libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json +libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json +libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json +libcloud/test/compute/fixtures/gce/global_firewalls_post.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json +libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json +libcloud/test/compute/fixtures/gce/global_images.json +libcloud/test/compute/fixtures/gce/global_images_debian_6_squeeze_v20130926_deprecate.json +libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20130617_delete.json +libcloud/test/compute/fixtures/gce/global_images_post.json +libcloud/test/compute/fixtures/gce/global_networks.json +libcloud/test/compute/fixtures/gce/global_networks_default.json +libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json +libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json +libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json +libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json +libcloud/test/compute/fixtures/gce/global_networks_post.json +libcloud/test/compute/fixtures/gce/global_snapshots.json +libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json +libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json +libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json +libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json +libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json +libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json +libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json +libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json +libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json +libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json +libcloud/test/compute/fixtures/gce/project.json +libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json +libcloud/test/compute/fixtures/gce/regions.json +libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json +libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json +libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json +libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json +libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json +libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json +libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json +libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json +libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json +libcloud/test/compute/fixtures/gce/zones.json +libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json +libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json +libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json +libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json +libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json +libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json +libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json +libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json +libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json +libcloud/test/compute/fixtures/gogrid/image_list.json +libcloud/test/compute/fixtures/gogrid/image_save.json +libcloud/test/compute/fixtures/gogrid/ip_list.json +libcloud/test/compute/fixtures/gogrid/ip_list_empty.json +libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json +libcloud/test/compute/fixtures/gogrid/password_list.json +libcloud/test/compute/fixtures/gogrid/server_add.json +libcloud/test/compute/fixtures/gogrid/server_delete.json +libcloud/test/compute/fixtures/gogrid/server_edit.json +libcloud/test/compute/fixtures/gogrid/server_list.json +libcloud/test/compute/fixtures/gogrid/server_power.json +libcloud/test/compute/fixtures/gogrid/server_power_fail.json +libcloud/test/compute/fixtures/hostvirtual/create_node.json +libcloud/test/compute/fixtures/hostvirtual/get_node.json +libcloud/test/compute/fixtures/hostvirtual/list_images.json +libcloud/test/compute/fixtures/hostvirtual/list_locations.json +libcloud/test/compute/fixtures/hostvirtual/list_nodes.json +libcloud/test/compute/fixtures/hostvirtual/list_sizes.json +libcloud/test/compute/fixtures/hostvirtual/node_destroy.json +libcloud/test/compute/fixtures/hostvirtual/node_reboot.json +libcloud/test/compute/fixtures/hostvirtual/node_start.json +libcloud/test/compute/fixtures/hostvirtual/node_stop.json +libcloud/test/compute/fixtures/ibm_sce/allocate_address.xml +libcloud/test/compute/fixtures/ibm_sce/attach_volume.xml +libcloud/test/compute/fixtures/ibm_sce/create.xml +libcloud/test/compute/fixtures/ibm_sce/create_volume.xml +libcloud/test/compute/fixtures/ibm_sce/delete.xml +libcloud/test/compute/fixtures/ibm_sce/delete_address.xml +libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml +libcloud/test/compute/fixtures/ibm_sce/destroy_volume.xml +libcloud/test/compute/fixtures/ibm_sce/detach_volume.xml +libcloud/test/compute/fixtures/ibm_sce/images.xml +libcloud/test/compute/fixtures/ibm_sce/instances.xml +libcloud/test/compute/fixtures/ibm_sce/instances_deleted.xml +libcloud/test/compute/fixtures/ibm_sce/list_addresses.xml +libcloud/test/compute/fixtures/ibm_sce/list_storage_offerings.xml +libcloud/test/compute/fixtures/ibm_sce/list_volumes.xml +libcloud/test/compute/fixtures/ibm_sce/locations.xml +libcloud/test/compute/fixtures/ibm_sce/reboot_active.xml +libcloud/test/compute/fixtures/ibm_sce/sizes.xml +libcloud/test/compute/fixtures/joyent/my_datasets.json +libcloud/test/compute/fixtures/joyent/my_machines.json +libcloud/test/compute/fixtures/joyent/my_machines_create.json +libcloud/test/compute/fixtures/joyent/my_packages.json +libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json +libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json +libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json +libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json +libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json +libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json +libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json +libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json +libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json +libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json +libcloud/test/compute/fixtures/ktucloud/listZones_default.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json +libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json +libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json +libcloud/test/compute/fixtures/linode/_avail_datacenters.json +libcloud/test/compute/fixtures/linode/_avail_distributions.json +libcloud/test/compute/fixtures/linode/_avail_kernels.json +libcloud/test/compute/fixtures/linode/_avail_linodeplans.json +libcloud/test/compute/fixtures/linode/_batch.json +libcloud/test/compute/fixtures/linode/_linode_ip_list.json +libcloud/test/compute/fixtures/linode/_linode_list.json +libcloud/test/compute/fixtures/meta/helloworld.txt +libcloud/test/compute/fixtures/misc/dummy_rsa +libcloud/test/compute/fixtures/misc/dummy_rsa.pub +libcloud/test/compute/fixtures/nephoscale/list_images.json +libcloud/test/compute/fixtures/nephoscale/list_keys.json +libcloud/test/compute/fixtures/nephoscale/list_locations.json +libcloud/test/compute/fixtures/nephoscale/list_nodes.json +libcloud/test/compute/fixtures/nephoscale/list_password_keys.json +libcloud/test/compute/fixtures/nephoscale/list_sizes.json +libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json +libcloud/test/compute/fixtures/nephoscale/success_action.json +libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml +libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml +libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml +libcloud/test/compute/fixtures/opennebula_1_4/computes.xml +libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml +libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml +libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml +libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml +libcloud/test/compute/fixtures/opennebula_1_4/networks.xml +libcloud/test/compute/fixtures/opennebula_1_4/storage.xml +libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml +libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml +libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml +libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml +libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml +libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml +libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml +libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml +libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml +libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml +libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml +libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml +libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml +libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml +libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml +libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml +libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml +libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml +libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml +libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml +libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml +libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml +libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml +libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml +libcloud/test/compute/fixtures/openstack/300_multiple_choices.json +libcloud/test/compute/fixtures/openstack/_v1_1__auth.json +libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json +libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json +libcloud/test/compute/fixtures/openstack/_v2_0__auth.json +libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json +libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json +libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml +libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml +libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml +libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml +libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml +libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml +libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml +libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml +libcloud/test/compute/fixtures/openstack_v1.1/README +libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json +libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json +libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json +libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json +libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json +libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json +libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json +libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json +libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json +libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json +libcloud/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_base_image.xml +libcloud/test/compute/fixtures/opsource/oec_0_9_myaccount.xml +libcloud/test/compute/fixtures/rimuhosting/r_distributions.json +libcloud/test/compute/fixtures/rimuhosting/r_orders.json +libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json +libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json +libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json +libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json +libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml +libcloud/test/compute/fixtures/softlayer/empty.xml +libcloud/test/compute/fixtures/softlayer/fail.xml +libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml +libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml +libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml +libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml +libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml +libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml +libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml +libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml +libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml +libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml +libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml +libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml +libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml +libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml +libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml +libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml +libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml +libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml +libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml +libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml +libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml +libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml +libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml +libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml +libcloud/test/compute/fixtures/voxel/create_node.xml +libcloud/test/compute/fixtures/voxel/failure.xml +libcloud/test/compute/fixtures/voxel/images.xml +libcloud/test/compute/fixtures/voxel/locations.xml +libcloud/test/compute/fixtures/voxel/nodes.xml +libcloud/test/compute/fixtures/voxel/success.xml +libcloud/test/compute/fixtures/voxel/unauthorized.xml +libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json +libcloud/test/dns/__init__.py +libcloud/test/dns/test_base.py +libcloud/test/dns/test_gandi.py +libcloud/test/dns/test_google.py +libcloud/test/dns/test_hostvirtual.py +libcloud/test/dns/test_linode.py +libcloud/test/dns/test_rackspace.py +libcloud/test/dns/test_route53.py +libcloud/test/dns/test_zerigo.py +libcloud/test/dns/fixtures/gandi/create_record.xml +libcloud/test/dns/fixtures/gandi/create_zone.xml +libcloud/test/dns/fixtures/gandi/delete_record.xml +libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml +libcloud/test/dns/fixtures/gandi/delete_zone.xml +libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml +libcloud/test/dns/fixtures/gandi/get_zone.xml +libcloud/test/dns/fixtures/gandi/list_records.xml +libcloud/test/dns/fixtures/gandi/list_records_empty.xml +libcloud/test/dns/fixtures/gandi/list_zones.xml +libcloud/test/dns/fixtures/gandi/new_version.xml +libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml +libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json +libcloud/test/dns/fixtures/google/managed_zones_1.json +libcloud/test/dns/fixtures/google/no_record.json +libcloud/test/dns/fixtures/google/record.json +libcloud/test/dns/fixtures/google/records_list.json +libcloud/test/dns/fixtures/google/zone.json +libcloud/test/dns/fixtures/google/zone_create.json +libcloud/test/dns/fixtures/google/zone_list.json +libcloud/test/dns/fixtures/hostvirtual/get_record.json +libcloud/test/dns/fixtures/hostvirtual/get_zone.json +libcloud/test/dns/fixtures/hostvirtual/list_records.json +libcloud/test/dns/fixtures/hostvirtual/list_zones.json +libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json +libcloud/test/dns/fixtures/linode/create_domain.json +libcloud/test/dns/fixtures/linode/create_domain_validation_error.json +libcloud/test/dns/fixtures/linode/create_resource.json +libcloud/test/dns/fixtures/linode/delete_domain.json +libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json +libcloud/test/dns/fixtures/linode/delete_resource.json +libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json +libcloud/test/dns/fixtures/linode/domain_list.json +libcloud/test/dns/fixtures/linode/get_record.json +libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json +libcloud/test/dns/fixtures/linode/get_zone.json +libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json +libcloud/test/dns/fixtures/linode/resource_list.json +libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json +libcloud/test/dns/fixtures/linode/update_domain.json +libcloud/test/dns/fixtures/linode/update_resource.json +libcloud/test/dns/fixtures/rackspace/auth_1_1.json +libcloud/test/dns/fixtures/rackspace/auth_2_0.json +libcloud/test/dns/fixtures/rackspace/create_record_success.json +libcloud/test/dns/fixtures/rackspace/create_zone_success.json +libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json +libcloud/test/dns/fixtures/rackspace/delete_record_success.json +libcloud/test/dns/fixtures/rackspace/delete_zone_success.json +libcloud/test/dns/fixtures/rackspace/does_not_exist.json +libcloud/test/dns/fixtures/rackspace/get_record_success.json +libcloud/test/dns/fixtures/rackspace/get_zone_success.json +libcloud/test/dns/fixtures/rackspace/list_records_no_results.json +libcloud/test/dns/fixtures/rackspace/list_records_success.json +libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json +libcloud/test/dns/fixtures/rackspace/list_zones_success.json +libcloud/test/dns/fixtures/rackspace/unauthorized.json +libcloud/test/dns/fixtures/rackspace/update_record_success.json +libcloud/test/dns/fixtures/rackspace/update_zone_success.json +libcloud/test/dns/fixtures/route53/create_zone.xml +libcloud/test/dns/fixtures/route53/get_zone.xml +libcloud/test/dns/fixtures/route53/invalid_change_batch.xml +libcloud/test/dns/fixtures/route53/list_records.xml +libcloud/test/dns/fixtures/route53/list_zones.xml +libcloud/test/dns/fixtures/route53/record_does_not_exist.xml +libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml +libcloud/test/dns/fixtures/zerigo/create_record.xml +libcloud/test/dns/fixtures/zerigo/create_zone.xml +libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml +libcloud/test/dns/fixtures/zerigo/get_record.xml +libcloud/test/dns/fixtures/zerigo/get_zone.xml +libcloud/test/dns/fixtures/zerigo/list_records.xml +libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml +libcloud/test/dns/fixtures/zerigo/list_zones.xml +libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml +libcloud/test/loadbalancer/__init__.py +libcloud/test/loadbalancer/test_brightbox.py +libcloud/test/loadbalancer/test_cloudstack.py +libcloud/test/loadbalancer/test_elb.py +libcloud/test/loadbalancer/test_gce.py +libcloud/test/loadbalancer/test_gogrid.py +libcloud/test/loadbalancer/test_ninefold.py +libcloud/test/loadbalancer/test_rackspace.py +libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json +libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json +libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json +libcloud/test/loadbalancer/fixtures/brightbox/token.json +libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json +libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json +libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json +libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json +libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json +libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json +libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json +libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml +libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml +libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml +libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml +libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml +libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml +libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml +libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml +libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json +libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json +libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json +libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json +libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json +libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json +libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json +libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json +libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json +libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json +libcloud/test/storage/__init__.py +libcloud/test/storage/test_atmos.py +libcloud/test/storage/test_azure_blobs.py +libcloud/test/storage/test_base.py +libcloud/test/storage/test_cloudfiles.py +libcloud/test/storage/test_google_storage.py +libcloud/test/storage/test_local.py +libcloud/test/storage/test_s3.py +libcloud/test/storage/fixtures/atmos/already_exists.xml +libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml +libcloud/test/storage/fixtures/atmos/list_containers.xml +libcloud/test/storage/fixtures/atmos/not_empty.xml +libcloud/test/storage/fixtures/atmos/not_found.xml +libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml +libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml +libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml +libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml +libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml +libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml +libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json +libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json +libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json +libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json +libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json +libcloud/test/storage/fixtures/cloudfiles/list_containers.json +libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json +libcloud/test/storage/fixtures/cloudfiles/meta_data.json +libcloud/test/storage/fixtures/google_storage/list_container_objects.xml +libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml +libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml +libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml +libcloud/test/storage/fixtures/google_storage/list_containers.xml +libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml +libcloud/test/storage/fixtures/s3/complete_multipart.xml +libcloud/test/storage/fixtures/s3/initiate_multipart.xml +libcloud/test/storage/fixtures/s3/list_container_objects.xml +libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml +libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml +libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml +libcloud/test/storage/fixtures/s3/list_containers.xml +libcloud/test/storage/fixtures/s3/list_containers_empty.xml +libcloud/test/storage/fixtures/s3/list_multipart_1.xml +libcloud/test/storage/fixtures/s3/list_multipart_2.xml +libcloud/utils/__init__.py +libcloud/utils/compression.py +libcloud/utils/connection.py +libcloud/utils/dist.py +libcloud/utils/files.py +libcloud/utils/iso8601.py +libcloud/utils/logging.py +libcloud/utils/misc.py +libcloud/utils/networking.py +libcloud/utils/publickey.py +libcloud/utils/py3.py +libcloud/utils/xml.py \ No newline at end of file diff -Nru libcloud-0.5.0/apache_libcloud.egg-info/top_level.txt libcloud-0.15.1/apache_libcloud.egg-info/top_level.txt --- libcloud-0.5.0/apache_libcloud.egg-info/top_level.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/apache_libcloud.egg-info/top_level.txt 2014-07-02 21:19:06.000000000 +0000 @@ -0,0 +1 @@ +libcloud diff -Nru libcloud-0.5.0/CHANGES libcloud-0.15.1/CHANGES --- libcloud-0.5.0/CHANGES 2011-05-21 20:42:39.000000000 +0000 +++ libcloud-0.15.1/CHANGES 1970-01-01 00:00:00.000000000 +0000 @@ -1,231 +0,0 @@ - -*- coding: utf-8 -*- - -Changes with Apache Libcloud 0.5.0 - - *) Existing APIs directly on the libcloud.* module have been - deprecated and will be removed in version 0.6.0. Most methods - were moved to the libcloud.compute.* module. - - *) Add new libcloud.loadbalancers API, with initial support for: - - GoGrid Load Balancers - - Rackspace Load Balancers - [Roman Bogorodskiy] - - *) Add new libcloud.storage API, with initial support for: - - Amazon S3 - - Rackspace CloudFiles - [Tomaz Muraus] - - *) Add new libcloud.compute drivers for: - - Bluebox [Christian Paredes] - - Gandi.net [Aymeric Barantal] - - Nimbus [David LaBissoniere] - - OpenStack [Roman Bogorodskiy] - - *) Added "pricing" module and improved pricing handling. - [Tomaz Muraus] - - *) Updates to the GoGrid compute driver: - - Use API version 1.0. - - Remove sandbox flag. - - Add ex_list_ips() to list IP addresses assigned to the account. - - Implement ex_edit_image method which allows changing image attributes - like name, description and make image public or private. - [Roman Bogorodskiy] - - *) Updates to the Amazon EC2 compute driver: - - When creating a Node, use the name argument to set a Tag with the - value. [Tomaz Muraus] - - Add extension method for modifying node attributes and changing the - node size. [Tomaz Muraus] - - Add support for the new Amazon Region (Tokyo). [Tomaz Muraus] - - Added ex_create_tags and ex_delete_tags. [Brandon Rhodes] - - Include node Elastic IP addresses in the node public_ip attribute - for the EC2 nodes. [Tomaz Muraus] - - Use ipAddress and privateIpAddress attribute for the EC 2node public - and private ip. [Tomaz Muraus] - - Add ex_describe_addresses method to the EC2 driver. [Tomaz Muraus] - - *) Updates to the Rackspace CloudServers compute driver: - - Add ex_rebuild() and ex_get_node_details() [Andrew Klochkov] - - Expose URI of a Rackspace node to the node meta data. [Paul Querna] - - *) Minor fixes to get the library and tests working on Python 2.7 and PyPy. - [Tomaz Muraus] - -Changes with Apache Libcloud 0.4.2 (Released January 18, 2011) - - *) Fix EC2 create_node to become backward compatible for - NodeLocation. - [Tomaž Muraus] - - *) Update code for compatibility with CPython 2.5 - [Jerry Chen] - - *) Implement ex_edit_node method for GoGrid driver which allows - changing node attributes like amount of RAM or description. - [Roman Bogorodskiy] - - *) Add ex_set_password and ex_set_server_name to Rackspace driver. - [Peter Herndon, Paul Querna] - - *) Add Hard and Soft reboot methods to Rackspace driver. - [Peter Herndon] - - *) EC2 Driver availability zones, via ex_list_availability_zones; - list_locations rewrite to include availablity zones - [Tomaž Muraus] - - *) EC2 Driver Idempotency capability in create_node; LIBCLOUD-69 - [David LaBissoniere] - - *) SSL Certificate Name Verification: - - libcloud.security module - - LibcloudHTTPSConnection, LibcloudHTTPConnection (alias) - - Emits warning when not verifying, or CA certs not found - - *) Append ORD1 to available Rackspace location, but keep in the - same node as DFW1, because it's not readable or writeable from - the API. - [Per suggestion of Grig Gheorghiu] - - *) ex_create_ip_group, ex_list_ip_groups, ex_delete_ip_group, - ex_share_ip, ex_unshare_ip, ex_list_ip_addresses additions - to Rackspace driver - [Andrew Klochkov] - - *) New driver for CloudSigma - [Tomaž Muraus] - - *) New driver for Brightbox Cloud. LIBCLOUD-63 - [Tim Fletcher] - - *) Deployment capability to ElasticHosts - [Tomaž Muraus] - - *) Allow deploy_node to use non-standard SSH username and port - [Tomaž Muraus] - - *) Added Rackspace UK (London) support - [Chmouel Boudjnah] - - *) GoGrid driver: add support for locations, i.e. listing - of locations and creation of a node in specified - location - [Roman Bogorodskiy] - - *) GoGrid and Rackspace drivers: add ex_save_image() extra - call to convert running node to an image - [Roman Bogorodskiy] - - *) GoGrid driver: add support for creating 'sandbox' server - and populate isSandbox flag in node's extra information. - [Roman Bogorodskiy] - - *) Add ImportKeyPair and DescribeKeyPair to EC2. LIBCLOUD-62 - [Philip Schwartz] - - *) Update EC2 driver and test fixtures for new API. - [Philip Schwartz] - -Changes with Apache Libcloud 0.4.0 [Released October 6, 2010] - - *) Add create keypair functionality to EC2 Drivers. LIBCLOUD-57 - [Grig Gheorghiu] - - *) Improve handling of GoGrid accounts with limited access - API keys. [Paul Querna] - - *) New Driver for ElasticHosts. LIBCLOUD-45 - [Tomaz Muraus] - - *) Use more consistent name for GoGrid driver and use http - POST method for 'unsafe' operations - [Russell Haering] - - *) Implement password handling and add deployment support - for GoGrid nodes. - [Roman Bogorodskiy] - - *) Fix behavior of GoGrid's create_node to wait for a Node ID. - [Roman Bogorodskiy] - - *) Add ex_create_node_nowait to GoGrid driver if you don't need to - wait for a Node ID when creating a node. - [Roman Bogorodskiy] - - *) Removed libcloud.interfaces module. - [Paul Querna] - - *) Removed dependency on zope.interfaces. - [Paul Querna] - - *) RimuHosting moved API endpoint address. - [Paul Querna] - - *) Fix regression and error in GoGrid driver for parsing node objects. - [Roman Bogorodskiy] - - *) Added more test cases for GoGrid driver. LIBCLOUD-34 - [Roman Bogorodskiy, Jerry Chen] - - *) Fix parsing of Slicehost nodes with multiple Public IP addresses. - [Paul Querna] - - *) Add exit_status to ScriptDeployment. LIBCLOUD-36 - [Paul Querna] - - *) Update prices for several drivers. - [Brad Morgan, Paul Querna] - - *) Update Linode driver to reflect new plan sizes. - [Jed Smith] - - *) Change default of 'location' in Linode create_node. LIBCLOUD-41 - [Jed Smith, Steve Steiner] - - *) Document the Linode driver. - [Jed Smith] - - *) Request a private, LAN IP address at Linode creation. - [Jed Smith] - -Changes with Apache Libcloud 0.3.1 [Released May 11, 2010] - - *) Updates to Apache License blocks to correctly reflect status as an - Apache Project. - - *) Fix NOTICE file to use 2010 copyright date. - - *) Improve error messages for when running the test cases without - first setting up a secrets.py - -Changes with Apache Libcloud 0.3.0 [Tagged May 6, 2010, not released] - - *) New Drivers for: - - Dreamhost - - Eucalyptus - - Enomaly ECP - - IBM Developer Cloud - - OpenNebula - - SoftLayer - - *) Added new deployment and bootstrap API. - - *) Improved Voxel driver. - - *) Added support for Amazon EC2 Asia Pacific (Singapore) Region. - - *) Improved test coverage for all drivers. - - *) Add support for multiple security groups in EC2. - - *) Fixed bug in Rackspace and RimuHosting when using multiple threads. - - *) Improved debugging and logging of HTTP requests. - - *) Improved documentation for all classes and methods. - -Changes with Apache Libcloud 0.2.0 [Tagged February 2, 2010] - - *) First public release. diff -Nru libcloud-0.5.0/CHANGES.rst libcloud-0.15.1/CHANGES.rst --- libcloud-0.5.0/CHANGES.rst 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/CHANGES.rst 2014-07-02 20:54:10.000000000 +0000 @@ -0,0 +1,2827 @@ +Changelog +========= + +Changes with Apache Libcloud 0.15.1 +----------------------------------- + +Compute +~~~~~~~ + +- Allow user to limit a list of subnets which are returned by passing + ``subnet_ids`` and ``filters`` argument to the ``ex_list_subnets`` + method in the EC2 driver. + (LIBCLOUD-571, GITHUB-306) + [Lior Goikhburg] + +- Allow user to limit a list of internet gateways which are returned by + passing ``gateway_ids`` and ``filters`` argument to the + ``ex_list_internet_gateways`` method in the EC2 driver. + (LIBCLOUD-572, GITHUB-307) + [Lior Goikhburg] + +- Allow user to filter which nodes are returned by passing ``ex_filters`` + argument to the ``list_nodes`` method in the EC2 driver. + (LIBCLOUD-580, GITHUB-320) + [Lior Goikhburg] + +- Add network_association_id to ex_list_public_ips and CloudstackAddress object + (GITHUB-327) + [Roeland Kuipers] + +- Allow user to specify admin password by passing ``ex_admin_pass`` argument + to the ``create_node`` method in the Openstack driver. + (GITHUB-315) + [Marcus Devich] + +- Fix a possible race condition in deploy_node which would occur if node + is online and can be accessed via SSH, but the SSH key we want to use hasn't + been installed yet. + + Previously, we would immediately throw if we can connect, but the SSH key + hasn't been installed yet. + (GITHUB-331) + [David Gay] + +- Propagate an exception in ``deploy_node`` method if user specified an invalid + path to the private key file. Previously this exception was silently swallowed + and ignored. + [Tomaz Muraus] + +DNS +~~~ + +- Include a better message in the exception which is thrown when a request + in the Rackspace driver ends up in an ``ERROR`` state. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.15.0 +----------------------------------- + +General +~~~~~~~ + +- Use lxml library (if available) for parsing XML. This should substantially + reduce parsing time and memory usage for large XML responses (e.g. retrieving + all the available images in the EC2 driver). + [Andrew Mann] + +- Use --head flag instead of -X HEAD when logging curl lines for HEAD requests + in debug mode. + + Reported by Brian Metzler. + (LIBCLOUD-552) + [Tomaz Muraus] + +- Fix Python 3 compatibility bugs in the following functions: + + * import_key_pair_from_string in the EC2 driver + * publickey._to_md5_fingerprint + * publickey.get_pubkey_ssh2_fingerprint + + (GITHUB-301) + [Csaba Hoch] + +- Update CA_CERTS_PATH to also look for CA cert bundle which comes with + openssl Homebrew formula on OS x (/usr/local/etc/openssl/cert.pem). + (GITHUB-309) + [Pedro Romano] + +- Update Google drivers to allow simultaneous authornization for all the + supported Google Services. + (GITHUB-302) + [Eric Johnson] + +Compute +~~~~~~~ + +- Fix create_key_pair method which was not returning private key. + (LIBCLOUD-566) + [Sebastien Goasguen] + +- Map "Stopped" node state in the CloudStack driver to NodeState.STOPPED + instead of NodeState.TERMINATED, "Stopping" to NodeState.PENDING instead of + NodeState.TERMINATED and "Expunging" to NodeState.PENDING instead of + NodeState.TERMINATED. + (GITHUB-246) + [Chris DeRamus, Tomaz Muraus] + +- Add ex_create_tags and ex_delete_tags method to the CloudStack driver. + (LIBCLOUD-514, GITHUB-248) + [Chris DeRamus] + +- Add new G2 instances to the EC2 driver. + [Tomaz Muraus] + +- Add support for multiple API versions to the Eucalyptus driver and allows + user to pass "api_version" argument to the driver constructor. + (LIBCLOUD-516, GITHUB-249) + [Chris DeRamus] + +- Map "Powered Off" state in the vCloud driver from "TERMINATED" to "STOPPED". + (GITHUB-251) + [Ash Berlin] + +- Add ex_rename_node method to the DigitalOcean driver. + (GITHUB-252) + [Rahul Ranjan] + +- Improve error parsing in the DigitalOcean driver. + + Reported by Deni Bertovic. + [Tomaz Muraus] + +- Add extension methods for the VPC internet gateway management to the EC2 + driver. + (LIBCLOUD-525, GITHUB-255) + [Chris DeRamus] + +- Add CloudStackProject class to the CloudStack driver and add option to select + project and disk offering on node creation. + (LIBCLOUD-526, GITHUB-257) + [Jim Divine] + +- Fix IP address handling in the OpenStack driver. + (LIBCLOUD-503, GITHUB-235) + [Markos Gogoulos] + +- Ad new ex_delete_image and ex_deprecate_image method to the GCE driver. + (GITHUB-260) + [Franck Cuny] + +- Ad new ex_copy_image method to the GCE driver. + (GITHUB-258) + [Franck Cuny] + +- Ad new ex_set_volume_auto_delete method to the GCE driver. + (GITHUB-264) + [Franck Cuny] + +- Add ex_revoke_security_group_ingress method to the CloudStack driver. + [Chris DeRamus, Tomaz Muraus] + +- Allow user to pass ex_ebs_optimized argument to the create_node method + in the EC2 driver. + (GITHUB-272) + [zerthimon] + +- Add "deprecated" attribute to the Node object in the Google Compute Engine + driver. + (GITHUB-276) + [Chris / bassdread] + +- Update Softlayer driver to use "fullyQualifiedDomainName" instead of + "hostname" attribute for the node name. + (GITHUB-280) + [RoelVanNyen] + +- Allow user to specify target tags using target_tags attribute when creating + a firewall rule in the GCE driver. + (GITHUB-278) + [zerthimon] + +- Add new standard API for image management and initial implementation for the + EC2 and Rackspace driver. + (GITHUB-277) + [Matt Lehman] + +- Allow user to specify "displayname" attribute when creating a CloudStack node + by passing "ex_displayname" argument to the method. + + Also allow "name" argument to be empty (None). This way CloudStack + automatically uses Node's UUID for the name. + (GITHUB-289) + [Jeff Moody] + +- Deprecate "key" argument in the SSHClient class in favor of new "key_files" + argument. + + Also add a new "key_material" argument. This argument can contain raw string + version of a private key. + + Note 1: "key_files" and "key_material" arguments are mutually exclusive. + Note 2: "key_material" argument is not supported in the ShellOutSSHClient. + +- Use node id attribute instead of the name for the "lconfig" label in the + Linode driver. This way the label is never longer than 48 characters. + (GITHUB-287) + [earthgecko] + +- Add a new driver for Outscale SAS and Outscale INC cloud + (http://www.outscale.com). + (GITHUB-285, GITHUB-293, LIBCLOUD-536, LIBCLOUD-553) + [Benoit Canet] + +- Add new driver for HP Public Cloud (Helion) available via Provider.HPCLOUD + constant. + [Tomaz Muraus] + +- Allow user to specify availability zone when creating an OpenStack node by + passing "ex_availability_zone" argument to the create_node method. + Note: This will only work if the OpenStack installation is running + availability zones extension. + (GITHUB-295, LIBCLOUD-555) + [syndicut] + +- Allow user to pass filters to ex_list_networks method in the EC2 driver. + (GITHUB-294) + [zerthimon] + +- Allow user to retrieve container images using ex_get_image method in the + Google Compute Engine driver. + (GITHUB-299, LIBCLOUD-562) + [Magnus Andersson] + +- Add new driver for Kili public cloud (http://kili.io/) + [Tomaz Muraus] + +- Add "timeout" argument to the ParamikoSSHClient.run method. If this argument + is specified and the command passed to run method doesn't finish in the + defined timeout, `SSHCommandTimeoutError` is throw and the connection to the + remote server is closed. + + Note #1: If timed out happens, this functionality doesn't guarantee that the + underlying command will be stopped / killed. The way it works it simply + closes a connect to the remote server. + [Tomaz Muraus] + + Note #2: "timeout" argument is only available in the Paramiko SSH client. + +- Make "cidrs_ips" argument in the ex_authorize_security_group_egress method in + the EC2 driver mandatory. + (GITHUB-301) + [Csaba Hoch] + +- Add extension methods for manging floating IPs (ex_get_floating_ip, + ex_create_floating_ip, ex_delete_floating_ip) to the Openstack 1.1 driver. + (GITHUB-301) + [Csaba Hoch] + +- Fix bug in RimuHosting driver which caused driver not to work when the + provider returned compressed (gzip'ed) response. + (LIBCLOUD-569, GITHUB-303) + [amastracci] + +- Fix issue with overwriting the server memory values in the RimuHosting + driver. + (GUTHUB-308) + [Dustin Oberloh] + +- Add ex_all_tenants argument to the list_nodes method in the OpenStack driver. + (GITHUB-312) + [LIBCLOUD-575, Zak Estrada] + +- Add support for network management for advanced zones + (ex_list_network_offerings, ex_create_network, ex_delete_network) in the + CloudStack driver. + (GITHUB-316) + [Roeland Kuipers] + +- Add extension methods for routes and route table management to the EC2 + driver (ex_list_route_tables, ex_create_route_table, ex_delete_route_table, + ex_associate_route_table, ex_dissociate_route_table, + ex_replace_route_table_association, ex_create_route, ex_delete_route, + ex_replace_route) + (LIBCLOUD-574, GITHUB-313) + [Lior Goikhburg] + +- Fix ex_list_snapshots for HP Helion OpenStack based driver. + [Tomaz Muraus] + +- Allow user to specify volume type and number of IOPS when creating a new + volume in the EC2 driver by passing ``ex_volume_type`` and ``ex_iops`` + argument to the ``create_volume`` method. + [Tomaz Muraus] + +- Fix ex_unpause_node method in the OpenStack driver. + (GITHUB-317) + [Pablo OrduƱa] + +- Allow user to launch EC2 node in a specific VPC subnet by passing + ``ex_subnet`` argument to the create_node method. + (GITHUB-318) + [Lior Goikhburg] + +Storage +~~~~~~~ + +- Fix container name encoding in the iterate_container_objects and + get_container_cdn_url method in the CloudFiles driver. Previously, those + methods would throw an exception if user passed in a container name which + contained a whitespace. + + Reported by Brian Metzler. + (LIBCLOUD-552) + [Tomaz MUraus] + +- Fix a bug in the OpenStack Swift driver which prevented the driver to work + with installations where region names in the service catalog werent upper + case. + (LIBCLOUD-576, GITHUB-311) + [Zak Estrada] + +Load Balancer +~~~~~~~~~~~~~ + +- Add extension methods for policy managagement to the ELB driver. + (LIBCLOUD-522, GITHUB-253) + [Rahul Ranjan] + +DNS +~~~ + +- Fix update_record method in the Route56 driver so it works correctly for + records with multiple values. + [Tomaz Muraus] + +- Add ex_create_multi_value_record method to the Route53 driver which allows + user to create a record with multiple values with a single call. + [Tomaz Muraus] + +- Add new driver for Google DNS. + (GITHUB-269) + [Franck Cuny] + +Changes with Apache Libcloud 0.14.1 +----------------------------------- + +Compute +~~~~~~~ + +- Add new m3.medium and m3.large instance information to the EC2 driver. + [Tomaz Muraus] + +- Add a new driver for CloudSigma API v2.0. + [Tomaz Muraus] + +- Add "volume_id" attribute to the Node "extra" dictionary in the EC2 driver. + Also fix the value of the "device" extra attribute in the StorageVolume + object. (LIBCLOUD-501) + [Oleg Suharev] + +- Add the following extension methods to the OpenStack driver: ex_pause_node, + ex_unpause_node, ex_suspend_node, ex_resume_node. + (LIBCLOUD-505, GITHUB-238) + [Chris DeRamus] + +- Add ex_limits method to the CloudStack driver. + (LIBCLOUD-507, GITHUB-240) + [Chris DeRamus] + +- Add "extra" dictionary to the CloudStackNode object and include more + attributes in the "extra" dictionary of the network and volume object. + (LIBCLOUD-506, GITHUB-239) + [Chris DeRamus] + +- Add ex_register_image method to the EC2 driver. + (LIBCLOUD-508, GITHUB-241) + [Chris DeRamus] + +- Add methods for managing volume snapshots to the OpenStack driver. + (LIBCLOUD-512, GITHUB-245) + [Chris DeRamus] + +Load Balancer +~~~~~~~~~~~~~ + +- Fix a bug in the ex_targetpool_add_node and ex_targetpool_remove_node method + in the GCE driver. + [Rick Wright] + +Storage +~~~~~~~ + +- Allow user to use an internal endpoint in the CloudFiles driver by passing + "use_internal_url" argument to the driver constructor. + (GITHUB-229, GITHUB-231) + [John Obelenus] + +DNS +~~~ + +- Add PTR to the supported record types in the Rackspace driver. + [Tomaz Muraus] + +- Fix Zerigo driver to set Record.name attribute for records which refer + to the bare domain to "None" instead of an empty string. + [Tomaz Muraus] + +- For consistency with other drivers, update Rackspace driver to set + Record.name attribute for the records which refer to the bare domain + to "None" instead of setting them to FQDN. + [Tomaz Muraus] + +- Update Rackspace driver to support paginating through zones and records. + (GITHUB-230) + [Roy Wellington] + +- Update Route53 driver so it supports handling records with multiple values + (e.g. MX). + (LIBCLOUD-504, GITHUB-237) + [Chris DeRamus] + +- Update Route53 driver to better handle SRV records. + [Tomaz Muraus] + +- Update Route53 driver, make sure "ttl" attribute in the Record extra + dictionary is always an int. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.14.0 +----------------------------------- + +General +~~~~~~~ + +- Update API endpoints which are used in the HostVirtual drivers. + (LIBCLOUD-489) + [Dinesh Bhoopathy] + +- Add support for Amazon security token to the Amazon drivers. + (LIBCLOUD-498, GITHUB-223) + [Noah Kantrowitz] + +Compute +~~~~~~~ + +- Remove Slicehost driver. + + SliceHost API has been shut down in 2012 so it makes no sense to keep + this driver. + [Tomaz Muraus] + +- Modify drivers for public cloud providers which use HTTP Basic + authentication to not allow insecure connections (secure constructor + kwarg being set to False) by default. + + This way credentials can't accidentally be sent in plain text over the + write. + + Affected drivers: Bluebox, Joyent, NephoScale, OpSource, VPSNet + [Tomaz Muraus] + +- Remove "public_ip" and "private_ip" property which has been deprecated in + 0.7.0 from the Node object. + [Tomaz Muraus] + +- Move "is_private_ip" and "is_valid_ip_address" function from + libcloud.compute.base into libcloud.utils.networking module. + [Tomaz Muraus] + +- Allow user to pass "url" argument to the CloudStack driver constructor. + This argument can be provided instead of "host" and "path" arguments and + can contain a full URL to the API endpoint. (LIBCLOUD-430) + [Tomaz Muraus] + +- Allow user to pass None as a "location" argument to the create_node + method. (LIBCLOUD-431) + [Tomaz Muraus] + +- Refactor CloudStack Connection class so it looks more like other + connection classes and user can specify which attributes to send as part + of query parameters in the GET request and which inside the body of a POST + request. + [Tomaz Muraus, Philipp Strube] + +- Add a new driver for Exoscale (https://www.exoscale.ch/) provider. + [Tomaz Muraus] + +- Fix a bug in Abiquo driver which caused the driver to fail if the endpoint + URL didn't start with "/api". (LIBCLOUD-447) + + Reported by Igor Ajdisek. + [Tomaz Muraus] + +- Modify CloudStack driver to correctly throw InvalidCredsError exception if + invalid credentials are provided. + [Tomaz Muraus] + +- Don't throw an exception if a node object is missing an "image" attribute + in the list nodes / get node response. + + This could happen if node is in an error state. (LIBCLOUD-455) + [Dustin Spicuzza, Tomaz Muraus] + +- Update CloudStack driver to better handle errors and throw ProviderError + instead of a generic Exception. + [Tomaz Muraus] + +- Modify ex_list_networks methods in CloudStack driver to not thrown if there + are no networks available. + [Tomaz Muraus] + +- Bump API version used in the EC2 driver from 2010-08-21 to 2013-10-15. + (LIBCLOUD-454) + [Tomaz Muraus] + +- Add ex_get_limits method for retrieving account resource limits to the + EC2 driver. + [Tomaz Muraus] + +- Update us-west-1 region in the EC2 driver to include c3 instance types. + Also include pricing information. + [Tomaz Muraus] + +- For consistency, rename "ex_add_ip_forwarding_rule" method to + "ex_create_ip_forwarding_rule". + (GITHUB-196) + [Oleg Suharev] + +- Add support for new "i2" instance types to Amazon EC2 driver. Also + update pricing file. (LIBCLOUD-465) + [Chris DeRamus] + +- Allow user to specify VPC id when creating a security group in the EC2 + driver by passing "vpc_id" argument to ex_create_security_group method. + (LIBCLOUD-463, GITHUB-201) + [Chris DeRamus] + +- Add extension methods for managing security group rules + (ex_authorize_security_group_ingress, ex_authorize_security_group_egress, + ex_revoke_security_group_ingress, ex_revoke_security_group_egress) to the + EC2 driver. (LIBCLOUD-466, GITHUB-202) + [Chris DeRamus] + +- Add extension methods for deleting security groups + (ex_delete_security_group, ex_delete_security_group_by_id, + ex_delete_security_group_by_name) to the EC2 driver. + (LIBCLOUD-464, GITHUB-199) + [Chris DeRamus] + +- Add extension method for listing reserved instances + (ex_list_reserved_nodes) to the EC2 driver. (LIBCLOUD-469, GITHUB-205) + [Chris DeRamus] + +- Add extension methods for VPC management (ex_list_networks, + ex_create_network, ex_delete_network) to the EC2 driver. + (LIBCLOUD-467, GITHUB-203) + [Chris DeRamus] + +- Add extension methods for VPC subnet management (ex_list_subnets, + ex_create_subnet, ex_delete_subnet) to the EC2 driver. + (LIBCLOUD-468, GITHUB-207) + [Chris DeRamus] + +- Add ex_get_console_output extension method to the EC2 driver. + (LIBCLOUD-471, GITHUB-209) + [Chris DeRamus] + +- Include additional provider-specific attributes in the 'extra' dictionary + of the StorageVolume class in the EC2 driver. (LIBCLOUD-473, GITHUB-210) + [Chris DeRamus] + +- Change attribute name in the 'extra' dictionary of EC2 and CloudStack + Node object from "keyname" to "key_name". (LIBCLOUD-475) + [Oleg Suharev] + +- Fix a deployment issue which would some times cause a process to hang if + the executed deployment script printed a lot of output to stdout or stderr. + [Tomaz Muraus] + +- Add additional attributes to the "extra" dictionary of the VolumeSnapshot + object in the EC2 driver. + + Also modify create_volume_snapshot method to correctly handle "name" + argument. Previous, "name" argument was used as a snapshot description, + now it's used as a Tag with a key "Name". (LIBCLOUD-480, GITHUB-214) + [Chris DeRamus] + +- Store additional attributes (iops, tags, block_device_mapping) in the + "extra" dictionary of the NodeImage object in the EC2 driver. + + Also fix ex_image_ids filtering in the list_images method. + (LIBCLOUD-481, GITHUB-215) + [Chris DeRamus] + +- Add extension methods for network interface management + (ex_list_network_interfaces, ex_create_network_interface, + ex_attach_network_interface_to_node, ex_detach_network_interface, + ex_delete_network_interface) to the EC2 driver. (LIBCLOUD-474) + [Chris DeRamus] + +- Update Google Compute Engine driver to use and work with API v1. + (LIBCLOUD-450) + [Rick Wright] + +- Modify ParamikoSSHClient so that "password" and "key" arguments are not + mutually exclusive and both can be provided. (LIBCLOUD-461, GITHUB-194) + [Markos Gogoulos] + +- Add extension methods for the Elastic IP management to the EC2 driver. + Also modify "ex_allocate_address" and "ex_release_address" method to + take "domain" argument so it also works with VPC. + (LIBCLOUD-470, GITHUB-208, GITHUB-220) + [Chris DeRamus] + +- Add additional provider specific attributes to the "extra" dictionary of + the Node object in the EC2 driver. (LIBCLOUD-493, GITHUB-221) + [Chris DeRamus] + +- Add ex_copy_image and ex_create_image_from_node method to the EC2 driver. + (LIBCLOUD-494, GITHUB-222) + [Chris DeRamus] + +Storage +~~~~~~~ + +- Allow user to specify 'Content-Disposition' header in the CloudFiles + driver by passing 'content_disposition' key in the extra dictionary of + the upload object methods. (LIBCLOUD-430) + [Michael Farrell] + +- Fix CloudFiles driver so it references a correct service catalog entry for + the CDN endpoint. + + This was broken in the 0.14.0-beta3 release when we migrated all the + Rackspace drivers to use auth 2.0 by default. (GITHUB-186) + [John Obelenus] + +- Update storage drivers to default to "application/octet-stream" + Content-Type if none is provided and none can be guessed. + (LIBCLOUD-433) + [Michael Farrell] + +- Fix a bug so you can now upload 0 bytes sized objects using multipart + upload in the S3 driver. (LIBCLOUD-490) + + Reported by Noah Kantrowitz. + [Tomaz Muraus] + +- Update OpenStack Swift driver constructor so it accepts "region", + "ex_force_service_type" and "ex_force_service_name" argument. + [Tomaz Muraus] + +- Deprecate "CLOUDFILES_SWIFT" provider constant in favor of new + "OPENSTACK_SWIFT" one. + [Tomaz Muraus] + +- Add support for setting an ACL when uploading and object. + (LIBCLOUD-497, GITHUB-223) + [Noah Kantrowitz] + +- Modify get_container method to use a more efficient "HEAD" + approach instead of calling list_containers + doing late + filterting. + (LIBCLOUD-498, GITHUB-223) + [Noah Kantrowitz] + +DNS +~~~ + +- Implement iterate_* methods in the Route53 driver and makes it work + correctly if there are more results which can fit on a single page. + Previously, only first 100 results were returned. (LIBCLOUD-434) + [Chris Clarke] + +- Update HostVirtual driver constructor to only take "key" and other valid + arguments. Previously it also took "secret" argument which it silently + ignored. (LIBCLOUD-483) + + Reported by Andrew Udvare. + [Tomaz Muraus] + +- Fix list_records method in the HostVirtual driver. + (LIBCLOUD-484, GITHUB-218) + + Reported by Andrew Udvare. + [Dinesh Bhoopathy] + +Changes with Apache Libcloud 0.14.0-beta3 +----------------------------------------- + +General +~~~~~~~ + +- If the file exists, read pricing data from ~/.libcloud/pricing.json + by default. If the file doesn't exist, fall back to the old behavior + and use pricing data which is bundled with the release. + [Tomaz Muraus] + +- Add libcloud.pricing.download_pricing_file function for downloading and + updating the pricing file. + [Tomaz Muraus] + +- Fix libcloud.utils.py3.urlquote so it works with unicode strings under + Python 2. (LIBCLOUD-429) + [Michael Farrell] + +Compute +~~~~~~~ + +- Refactor Rackspace driver classes and make them easier to use. Now there + are two Rackspace provider constants - Provider.RACKSPACE which + represents new next-gen OpenStack servers and + Provider.RACKSPACE_FIRST_GEN which represents old first-gen cloud + servers. + + Note: This change is backward incompatible. For more information on those + changes and how to update your code, please visit "Upgrade Notes" + documentation page - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Deprecate the following EC2 provider constants: EC2_US_EAST, + EC2_EU, EC2_EU_WEST, EC2_AP_SOUTHEAST, EC2_AP_NORTHEAST, + EC2_US_WEST_OREGON, EC2_SA_EAST, EC2_SA_EAST and replace it with a new + EC2 constant. + Driver referenced by this new constant now takes a "region" argument which + dictates to which region to connect. + + Note: Deprecated constants will continue to work until the next major + release. For more information on those changes and how to update your + code, please visit "Upgrade Notes" documentation page - + http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Add support for volume related functions to OpenNebula driver. + (LIBCLOUD-354) + [Emanuele Rocca] + +- Add methods for managing storage volumes to the OpenStack driver. + (LIBCLOUD-353) + [Bernard Kerckenaere] + +- Add new driver for Google Compute Engine (LIBCLOUD-266, LIBCLOUD-386) + [Rick Wright] + +- Fix create_node "features" metadata and update affected drivers. + (LIBCLOUD-367) + [John Carr] + +- Update EC2 driver to accept the auth kwarg (it will accept NodeAuthSSH + objects and automatically import a public key that is not already + uploaded to the EC2 keyring). (Follow on from LIBCLOUD-367). + [John Carr] + +- Unify extension argument names for assigning a node to security groups + in EC2 and OpenStack driver. + Argument in the EC2 driver has been renamed from ex_securitygroup to + ex_security_groups. For backward compatibility reasons, old argument + will continue to work until the next major release. (LIBCLOUD-375) + [Tomaz Muraus] + +- Add ex_import_keypair_from_string and ex_import_keypair method to the + CloudStack driver. (LIBCLOUD-380) + [Sebastien Goasguen] + +- Add support for managing floating IP addresses to the OpenStack driver. + (LIBCLOUD-382) + [Ivan Kusalic] + +- Add extension methods for handling port forwarding to the CloudStack + driver, rename CloudStackForwardingRule class to + CloudStackIPForwardingRule. (LIBCLOUD-348, LIBCLOUD-381) + [sebastien goasguen] + +- Hook up deploy_node functionality in the CloudStack driver and unify + extension arguments for handling security groups. (LIBCLOUD-388) + [sebastien goasguen] + +- Allow user to pass "args" argument to the ScriptDeployment and + ScriptFileDeployment class. This argument tells which command line + arguments get passed to the ScriptDeployment script. (LIBCLOUD-394) + + Note: This change is backward incompatible. For more information on how + this affects your code and how to update it, visit "Upgrade Notes" + documentation page - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Allow user to specify IAM profile to use when creating an EC2 node. + (LIBCLOUD-403) + [Xavier Barbosa] + +- Add support for keypair management to the OpenStack driver. + (LIBCLOUD-392) + [L. Schaub] + +- Allow user to specify disk partitioning mode using ex_disk_config argument + in the OpenStack based drivers. (LIBCLOUD-402) + [Brian Curtin] + +- Add new driver for NephoScale provider (http://nephoscale.com/). + (LIBCLOUD-404) + [Markos Gogoulos] + +- Update network related extension methods so they work correctly with + both, OpenStack and Rackspace driver. (LIBCLOUD-368) + [Tomaz Muraus] + +- Add tests for networking functionality in the OpenStack and Rackspace + driver. + [Tomaz Muraus] + +- Allow user to pass all supported extension arguments to ex_rebuild_server + method in the OpenStack driver. (LIBCLOUD-408) + [Dave King] + +- Add pricing information for Rackspace Cloud Sydney region. + [Tomaz Muraus] + +- Update EC2 instance type map and pricing data. High Storage instances are + now also available in Sydney and Singapore region. + [Tomaz Muraus] + +- Add new methods for managing storage volumes and snapshots to the EC2 + driver (list_volumes, list_snapshots, destroy_volume_snapshot, + create_volume_snapshot) (LIBCLOUD-409) + [Oleg Suharev] + +- Add the following new extension methods to EC2 driver: ex_destroy_image, + ex_modify_instance_attributes, ex_delete_keypair. (LIBCLOUD-409) + [Oleg Suharev] + +- Allow user to specify a port range when creating a port forwarding rule. + (LIBCLOUD-409) + [Oleg Suharev] + +- Align Joyent driver with other drivers and deprecate "location" argument + in the driver constructor in favor of "region" argument. + + Note: Deprecated argument will continue to work until the next major + release. + [Tomaz Muraus] + +- Deprecate the following ElasticHosts provider constants: ELASTICHOSTS_UK1, + ELASTICHOSTS_UK2, ELASTICHOSTS_US1, ELASTICHOSTS_US2, ELASTICHOSTS_US3, + ELASTICHOSTS_CA1, ELASTICHOSTS_AU1, ELASTICHOSTS_CN1 and replace it with a + new ELASTICHOSTS constant. + Driver referenced by this new constant now takes a "region" argument which + dictates to which region to connect. + + Note: Deprecated constants will continue to work until the next major + release. For more information on those changes and how to update your + code, please visit "Upgrade Notes" documentation page - + http://s.apache.org/lc0140un (LIBCLOUD-383) + [Michael Bennett, Tomaz Muraus] + +- Add log statements to our ParamikoSSHClient wrapper. This should make + debugging deployment issues easier. (LIBCLOUD-414) + [Tomaz Muraus] + +- Add new "NodeState.STOPPED" node state. Update HostVirual and EC2 driver to + also recognize this new state. (LIBCLOUD-296) + [Jayy Vis] + +- Add new Hong Kong endpoint to Rackspace driver. + [Brian Curtin] + +- Fix ex_delete_keypair method in the EC2 driver. (LIBCLOUD-415) + [Oleg Suharev] + +- Add the following new extension methods for elastic IP management to the + EC2 driver: ex_allocate_address, ex_disassociate_address, + ex_release_address. (LIBCLOUD-417) + [Patrick Armstrong] + +- For consistency and accuracy, rename "ex_associate_addresses" method in the + EC2 driver to "ex_associate_address_with_node". + + Note: Old method will continue to work until the next major release. + [Tomaz Muraus] + +- Add new driver for CloudFrames (http://www.cloudfounders.com/CloudFrames) + provider. (LIBCLOUD-358) + [Bernard Kerckenaere] + +- Update default kernel versions which are used when creating a Linode + server. + + Old default kernel versions: + + - x86 - 2.6.18.8-x86_64-linode1 + - x86_64 - 2.6.39.1-linode34 + + New default kernel versions: + + - x86 - 3.9.3-x86-linode52 + - x86_64 - 3.9.3-x86_64-linode33 + + (LIBCLOUD-424) + [Tomaz Muraus, Jon Chen] + +- Disable cache busting functionality in the OpenStack and Rackspace next-gen + driver and enable it only for Rackspace first-gen driver. + [Tomaz Muraus] + +- Update Google Compute Engine driver to v1beta16. + [Rick Wright] + +- Modify auth_url variable in the OpenStack drivers so it works more like + users would expect it to. + + Previously path specified in the auth_url was ignored and only protocol, + hostname and port were used. Now user can provide a full url for the + auth_url variable and the path provided in the url is also used. + [DaeMyung Kang, Tomaz Muraus] + +- Allow user to associate arbitrary key/value pairs with a node by passing + "ex_metadata" argument (dictionary) to create_node method in the EC2 + driver. + Those values are associated with a node using tags functionality. + (LIBCLOUD-395) + [Ivan Kusalic] + +- Add "ex_get_metadata" method to EC2 and OpenStack driver. This method reads + metadata dictionary from the Node object. (LIBCLOUD-395) + [Ivan Kusalic] + +- Multiple improvements in the Softlayer driver: + - Map "INITIATING" node state to NodeState.PENDING + - If node is launching remap "halted" state to "pending" + - Add more node sizes + - Add ex_stop_node and ex_start_node method + - Update tests response fixtures + + (LIBCLOUD-416) + [Markos Gogoulos] + +- Modify list_sizes method in the KT UCloud driver to work, even if the item + doesn't have 'diskofferingid' attribute. (LIBCLOUD-435) + [DaeMyung Kang] + +- Add new c3 instance types to the EC2 driver. + [Tomaz Muraus] + +- Fix an issue with the ex_list_keypairs and ex_list_security_groups method + in the CloudStack driver which caused an exception to be thrown if the API + returned no keypairs / security groups. + (LIBCLOUD-438) + [Carlos Reategui, Tomaz Muraus] + +- Fix a bug in the OpenStack based drivers with not correctly checking if the + auth token has expired before re-using it. (LIBCLOUD-428) + + Reported by Michael Farrell. + [Tomaz Muraus, Michael Farrell] + +Storage +~~~~~~~ + +- Deprecate CLOUDFILES_US and CLOUDFILES_UK provider constant and replace + it with a new CLOUDFILES constant. + Driver referenced by this new constant takes a "region" keyword argument + which can be one of 'ord', 'dfw', 'iad', 'syd', 'lon'. + + Note: Deprecated constants will continue to work until the next major + release. + For more information on this change, please visit "Upgrade Notes" + documentation section - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Allow users to filter objects starting with a prefix by passing ex_prefix + argument to the list_container_objects method in the S3, Google Storage + and CloudFiles driver. (LIBCLOUD-369) + [Stefan Friesel] + +- Fix an issue with mutating connectionCls.host attribute in the Azure + driver. This bug prevented user from having multiple Azure drivers with + different keys instantiated at the same time. (LIBCLOUD-399) + [Olivier Grisel] + +- Add a new driver for KT UCloud based on the OpenStack Swift driver. + (LIBCLOUD-431). + [DaeMyung Kang] + +Load Balancer +~~~~~~~~~~~~~ + +- Deprecate RACKSPACE_US and RACKSPACE_UK provider constant and replace it + with a new RACKSPACE constant. + Driver referenced by this new constant takes a "region" keyword argument + which can be one of the following: 'ord', 'dfw', 'iad', 'syd', 'lon'. + + Note: Deprecated constants will continue to work until the next major + release. + For more information on this change, please visit "Upgrade Notes" + documentation section - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Add new driver for Google Compute Engine (LIBCLOUD-386) + [Rick Wright] + +- Add new Hong Kong endpoint to Rackspace driver. + [Brian Curtin] + +DNS +~~~ + +- Deprecate RACKSPACE_US and RACKSPACE_UK provider constant and replace it + with a new RACKSPACE constant. + Driver referenced by this new constant takes a "region" keyword argument + which can be one of the following: 'us', 'uk'. + + Note: Deprecated constants will continue to work until the next major + release. + For more information on this change, please visit "Upgrade Notes" + documentation section - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Use string instead of integer for RecordType ENUM value. + + Note: If you directly use an integer instead of RecordType ENUM class you + need to update your code to use the RecordType ENUM otherwise the code + won't work. For more information on how to do that, see "Upgrade Notes" + documentation section - http://s.apache.org/lc0140un + [Tomaz Muraus] + +- Add "export_zone_to_bind_format" and export_zone_to_bind_zone_file method + which allows users to export Libcloud Zone to BIND zone format. + (LIBCLOUD-398) + [Tomaz Muraus] + +- Update issue with inexistent zone / record handling in the get_zone and + get_record method in the Linode driver. Those issues were related to + changes in the Linode API. (LIBCLOUD-425) + [Jon Chen] + +Changes with Apache Libcloud 0.13.3 +----------------------------------- + +Compute +~~~~~~~ + +- Send "scrub_data" query parameter when destroying a DigitalOcean node. + This will cause disk to be scrubbed (overwritten with 0's) when destroying + a node. (LIBCLOUD-487) + + Note: This fixes a security issue with a potential leak of data contained + on the destroyed node which only affects users of the DigitalOcean driver. + (CVE-2013-6480) + [Tomaz Muraus] + +Changes with Apache Libcloud 0.13.2 +----------------------------------- + +General +~~~~~~~ + +- Don't sent Content-Length: 0 header with POST and PUT request if "raw" + mode is used. This fixes a regression which could cause broken behavior + in some storage driver when uploading a file from disk. + (LIBCLOUD-396) + [Ivan Kusalic] + +Compute +~~~~~~~ + +- Added Ubuntu Linux 12.04 image to ElasticHost driver image list. + (LIBCLOUD-364) + [Bob Thompson] + +- Update ElasticHosts driver to store drive UUID in the node 'extra' field. + (LIBCLOUD-357) + [Bob Thompson] + +Storage +~~~~~~~ + +- Store last_modified timestamp in the Object extra dictionary in the S3 + driver. (LIBCLOUD-373) + [Stefan Friesel] + +Load Balancer +~~~~~~~~~~~~~ + +- Expose CloudStack driver directly through the Provider.CLOUDSTACK + constant. + [Tomaz Muraus] + +DNS +~~~ + +- Modify Zerigo driver to include record TTL in the record 'extra' attribute + if a record has a TTL set. + [Tomaz Muraus] + +- Modify values in the Record 'extra' dictionary attribute in the Zerigo DNS + driver to be set to None instead of an empty string ('') if a value for + the provided key is not set. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.13.1 +----------------------------------- + +General +~~~~~~~ + +- Fix a regression introduced in 0.13.0 and make sure to include + Content-Length 0 with PUT and POST requests. (LIBCLOUD-362, LIBCLOUD-390) + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Fix a bug in the ElasticHosts driver and check for right HTTP status + code when determining drive imaging success. (LIBCLOUD-363) + [Bob Thompson] + +- Update Opsource driver to include node public ip address (if available). + (LIBCLOUD-384) + [Michael Bennett] + +Storage +~~~~~~~ + +- Fix a regression with calling encode_container_name instead of + encode_object_name on object name in get_object method. + Reported by Ben Meng (LIBCLOUD-366) + [Tomaz Muraus] + +- Ensure that AWS S3 multipart upload works for small iterators. + (LIBCLOUD-378) + [Mahendra M] + +Changes with Apache Libcloud 0.13.0 +----------------------------------- + +General +~~~~~~~ + +- Add homebrew curl-ca-bundle path to CA_CERTS_PATH. This will make Libcloud + use homebrew curl ca bundle file (if available) for server certificate + validation. (LIBCLOUD-324) + [Robert Chiniquy] + +- Modify OpenStackAuthConnection and change auth_token_expires attribute to + be a datetime object instead of a string. + [Tomaz Muraus] + +- Modify OpenStackAuthConnection to support re-using of the existing auth + token if it's still valid instead of re-authenticating on every + authenticate() call. + [Tomaz Muraus] + +- Modify base Connection class to not send Content-Length header if body is + not provided. + [Tomaz Muraus] + +- Add the new error class ProviderError and modify InvalidCredsError to + inherit from it. (LIBCLOUD-331) + [Jayy Vis] + +Misc +---- + +- Add unittest2 library dependency for tests and update some tests to use + it. + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Fix destroy_node method in the experimental libvirt driver. + [Aymen Fitati] + +- Add ex_start_node method to the Joyent driver. (LIBCLOUD-319) + [rszabo50] + +- Fix Python 3 compatibility issue in the ScriptFileDeployment class. + (LIBCLOUD-321) + [Arfrever Frehtes Taifersar Arahesis] + +- Add ex_set_metadata_entry and ex_get_metadata method to the VCloud driver. + (LIBCLOUD-318) + [Michel Samia] + +- Various improvements and bug-fixes in the VCloud driver. (LIBCLOUD-323) + [Michel Samia] + +- Various bug fixes and improvements in the HostVirtual driver. + (LIBCLOUD-249) + [Dinesh Bhoopathy] + +- Modify list_sizes method in the OpenStack driver to include + OpenStackNodeSize object which includes 'vcpus' attribute which holds + a number of virtual CPUs for this size. (LIBCLOUD-325) + [Carlo] + +- For consistency rename "ex_describe_keypairs" method in the EC2 driver to + "ex_describe_keypair". + [Tomaz Muraus] + +- Modify "ex_describe_keypair" method to return key fingerprint in the + return value. (LIBCLOUD-326) + [Andre Merzky, Tomaz Muraus] + +- Populate private_ips attribute in the CloudStack drive when returning + a Node object from the create_node method. (LIBCLOUD-329) + [Sebastien Goasguen, Tomaz Muraus] + +- Allow user to pass extra arguments via "extra_args" argument which are + then passed to the "deployVirtualMachine" call in the CloudStack driver + create_node method. (LIBCLOUD-330) + [Sebastien Goasguen, Tomaz Muraus] + +- Update Gandi driver to handle new billing model. (LIBCLOUD-317) + [Aymeric Barantal] + +- Fix a bug in the Linode driver and remove extra newline which is added + when generating a random root password in create_node. (LIBCLOUD-334) + [Juan Carlos Moreno] + +- Add extension methods for managing keypairs to the CloudStack driver. + (LIBCLOUD-333) + [sebastien goasguen] + +- Add extension methods for managing security groups to the CloudStack + driver. (LIBCLOUD-332) + [sebastien goasguen] + +- Add extension methods for starting and stoping the node to the + CloudStack driver. (LIBCLOUD-338) + [sebastien goasguen] + +- Fix old _wait_until_running method. (LIBCLOUD-339) + [Bob Thompson] + +- Allow user to override default async task completion timeout by + specifying ex_clone_timeout argument. (LIBCLOUD-340) + [Michal Galet] + +- Fix a bug in the GoGrid driver get_uuid method. (LIBCLOUD-341) + [Bob Thompson] + +- Fix a bug with deploy_node not respecting 'timeout' kwarg. + [Kevin Carter] + +- Modify create_node method in CloudStack driver to return an instance of + CloudStackNode and add a new "expunging" node state. (LIBCLOUD-345) + [sebastien goasguen] + +- Update API endpoint hostnames in the ElasticHost driver and use hostnames + which return a valid SSL certificate. (LIBCLOUD-346) + [Bob Thompson] + +- Add ex_list_networks method and missing tests for list_templates to the + CloudStack driver. (LIBCLOUD-349) + [Philipp Strube] + +- Correctly throw InvalidCredsError if user passes invalid credentials to + the DigitalOcean driver. + [Tomaz Muraus] + +Storage +~~~~~~~ + +- Fix an issue with double encoding the container name in the CloudFiles + driver upload_object method. + Also properly encode container and object name used in the HTTP request + in the get_container and get_object method. (LIBCLOUD-328) + [Tomaz Muraus] + +Load Balancer +~~~~~~~~~~~~~ + +- Add ex_list_current_usage method to the Rackspace driver. + +Changes with Apache Libcloud 0.12.4 +----------------------------------- + +Compute +~~~~~~~ + +- Fix a regression in Softlayer driver caused by the xmlrpclib changes. + (LIBCLOUD-310) + [Jason Johnson] + +- Allow user to pass alternate ssh usernames to deploy_node + (ssh_alternate_usernames kwarg) which are used for authentication if the + default one doesn't work. (LIBCLOUD-309) + [Chris Psaltis, Tomaz Muraus] + +- Fix a bug in EC2 list_locations method - 'name' attribute didn't contain a + the right value. + [Tomaz Muraus] + +- Add new ScriptFileDeployment deployment class which reads deploy script + from a file. + [Rudolf J Streif] + +- Add support for API version 5.1 to the vCloud driver and accept any value + which is a multiple of four for ex_vm_memory kwarg in create_node method. + (LIBCLOUD-314) + [Trevor Powell] + +Storage +~~~~~~~ + +- Fix a regression with removed ex_force_service_region constructor kwarg in + the CloudFiles driver. (LIBCLOUD-260) + +Changes with Apache Libcloud 0.12.3 +----------------------------------- + +General +~~~~~~~ + +- Fix Python 3.x related regressions. (LIBCLOUD-245) + Reported by Arfrever Frehtes Taifersar Arahesis. + [Tomaz Muraus] + +- Fix a regression introduced with recent xmlrpiclib changes which broke all + the Gandi.net drivers. (LIBCLOUD-288) + + Reported by Hutson Betts. + [Tomaz Muraus] + +- Improve deploy code to work correctly if the ssh user doesn't have access + to the /root directory. + + Previously the ScriptDeployment script was stored in /root folder by + default. Now it's stored in users home directory under filename + ~/libcloud_deploymeny_.sh. (LIBCLOUD-302) + + Reported by rotem on #libcloud. + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Improve public and private IP address handling in OpenStack 1.1 driver. + Assume every IP address which doesn't have a label "public" or "internet" + is private. (LIBCLOUD-297) + [Grischa Meyer, Tomaz Muraus] + +- Add new driver for DigitalOcean provider - https://www.digitalocean.com/. + (LIBCLOUD-304) + [Tomaz Muraus] + +- Fix a regression in ParamikoSSHClient.run method which caused this methid + to only work as expected if you passed an absolute or a relative path to + the script to it. (LIBCLOUD-278) + [Tomaz Muraus] + +DNS +~~~ + +- Allow user to specify 'priority' extra argument when creating a MX or SRV + record. + [Brian Jinwright, Tomaz Muraus] + +Changes with Apache Libcloud 0.12.1 +----------------------------------- + +General +~~~~~~~ + +- Deprecate LazyList method of iteration over large paginated collections + and use a new, more efficient generator based approach which doesn't + require the iterator to be pre-exhausted and buffering all of the values + in memory. + + Existing list_* methods which previously used LazyList class are + preserving the old behavior and new iterate_* methods which use a new + generator based approach have been added. (LIBCLOUD-254) + [Mahendra M] + +- Replace old ENUM style provider constants and replace them with a string + version. + This change allows users to dynamically register new drivers using a new + set_driver method. (LIBCLOUD-255) + [Mahendra M] + +- Allow user to explicitly specify which CA file is used for verifying + the server certificate by setting 'SSL_CERT_FILE' environment variable. + + Note: When this variable is specified, the specified path is the only + CA file which is used to verifying the server certificate. (LIBCLOUD-283) + [Tomaz Muraus, Erinn Looney-Triggs] + +- Add a common module (libcloud.common.xmlrpc) for handling XML-RPC + requests using Libcloud http layer. + + Also refactor existing drivers which use xmlrpclib directly (VCL, Gandi, + Softlayer) to use this module. + + This change allows drivers to support LIBCLOUD_DEBUG and SSL certificate + validation functionality. Previously they have bypassed Libcloud http + layer so this functionality was not available. (LIBCLOUD-288) + [John Carr] + +Compute +~~~~~~~ + +- Fix string interpolation bug in __repr__ methods in the IBM SCE driver. + (LIBCLOUD-242) + [Tomaz Muraus] + +- Fix test failures which happened in Python 3.3 due to: + - hash randomization + - changes in xml.etree module + - changes in xmlrpc module + (LIBCLOUD-245) + [Tomaz Muraus] + +- Improvements and additions in vCloud driver: + - Expose generic query method (ex_query) + - Provide functionality to get and set control access for vApps. This way + created vApps can be shared between users/groups or everyone. + + (LIBCLOUD-251) + [Michal Galet] + +- Update EC2 pricing data to reflect new, lower prices - + http://aws.typepad.com/aws/2012/10/new-ec2-second-generation-standard-instances-and-price-reductions-1.html + [Tomaz Muraus] + +- Update EC2 instance size to reflect new m3 instance types. Also refactor + the code to make it easier to maintain. + [Tomaz Muraus] + +- Add a new driver for HostVirtual (http://www.vr.org) provider. + (LIBCLOUD-249) + [Dinesh Bhoopathy] + +- Fix a bug where a numeric instead of a string value was used for the + content-length header in VCloud driver. (LIBCLOUD-256) + [Brian DeGeeter, Tomaz Muraus] + +- Add a new driver for new Asia Pacific (Sydney) EC2 region. + [Tomaz Muraus] + +- Add support for managing security groups to the OpenStack driver. This + patch adds the following extension methods: + - ex_list_security_groups, ex_get_node_security_groups methods + - ex_create_security_group, ex_delete_security_group + - ex_create_security_group_rule, ex_delete_security_group_rule + (LIBCLOUD-253) + [L. Schaub] + +- Modify ElasticStack driver class to pass 'vnc auto' instead of + 'vnc:ip auto' argument to the API when creating a server. + It looks like 'vnc:ip' has been replaced with 'vnc'. + [Rick Copeland, Tomaz Muraus] + +- Add new EC2 instance type - High Storage Eight Extra Large Instance + (hs1.8xlarge). + [Tomaz Muraus] + +- Map 'shutting-down' node state in EC2 driver to UNKNOWN. Previously + it was mapped to TERMINATED. (LIBCLOUD-280) + + Note: This change is backward incompatible which means you need to update + your code if you rely on the old behavior. + [Tomaz Muraus, Marcin Kuzminski] + +- Change _wait_until_running method so it supports waiting on multiple nodes + and make it public (wait_until_running). (LIBCLOUD-274) + [Nick Bailey] + +- Add new EC2 instance type - High Memory Cluster Eight Extra Large. + (cr1.8xlarge). + [Tomaz Muraus] + +- Add new driver for Abiquo provider - http://www.abiquo.com (LIBCLOUD-250). + [Jaume Devesa] + +- Allow user to pass 'ex_blockdevicemappings' kwarg to the EC2 driver + 'create_node' method. (LIBCLOUD-282) + [Joe Miller, Tomaz Muraus] + +- Improve error handling in the Brightbox driver. + [Tomaz Muraus] + +- Fix the ScriptDeployment step to work correctly if user provides a + relative path for the script argument. (LIBCLOUD-278) + [Jaume Devesa] + +- Fix Softlayer driver and make sure all the code is up to date and works + with the latest version of the actual Softlayer deployment (v3). + (LIBCLOUD-287) + [Kevin McDonald] + +- Update EC2 driver, m3 instance types are now available in all the regions + except Brazil. + + Also update pricing to reflect new (lower) prices. + [Tomaz Muraus] + +- Minor improvements in the HostVirtual driver and add new ex_get_node and + ex_build_node extension method. (LIBCLOUD-249) + [Dinesh Bhoopathy] + +- Add ex_destroy_image method to IBM SCE driver. (LIBCLOUD-291) + [Perry Zou] + +- Add the following new regions to the ElasticHosts driver: sjc-c, syd-v, + hkg-e. (LIBCLOUD-293) + [Tomaz Muraus] + +- Fix create_node in OpenStack driver to work correctly if 'adminPass' + attribute is not present in the response. + [Gavin McCance, Tomaz Muraus] + +- Allow users to filter images returned by the list_images method in the EC2 + driver by providing ex_image_ids argument. (LIBCLOUD-294) + [Chris Psaltis, Joseph Hall] + +- Add support for OpenNebula 3.8. (LIBCLOUD-295) + [Guillaume ZITTA] + +- Add missing 'deletd' -> terminated mapping to OpenStack driver. + (LIBCLOUD-276) + [Jayy Vis] + +- Fix create_node in OpenStack driver to work correctly if 'adminPass' + attribute is not present in the response. (LIBCLOUD-292) + [Gavin McCance, Tomaz Muraus] + +Storage +~~~~~~~ + +- Add a new local storage driver. + (LIBCLOUD-252, LIBCLOUD-258, LIBCLOUD-265, LIBCLOUD-273) + [Mahendra M] + +- Fix a bug which caused the connection to not be closed when using Python + 2.6 and calling get_object on an object which doesn't exist in the S3 + driver. (LIBCLOUD-257) + [John Carr] + +- Add a new generator based method for listing / iterating over the + containers (iterate_containers). (LIBCLOUD-261) + [Mahendra M] + +- Add ex_purge_object_from_cdn method to the CloudFiles driver. + (LIBCLOUD-267) + [Tomaz Muraus] + +- Support for multipart uploads and other improvements in the S3 driver + so it can more easily be re-used with other implementations (e.g. Google + Storage, etc.). + + Also default to a multipart upload when using upload_object_via_stream. + This methods is more efficient compared to old approach because it only + requires buffering a single multipart chunk (5 MB) in memory. + (LIBCLOUD-269) + [Mahendra M] + +- Add new driver for Windows Azure Storage with support for block and page + blobs. (LIBCLOUD-80) + [Mahendra M] + +DNS +~~~ + +- Update 'if type' checks in the update_record methods to behave correctly + if users passes in RecordType.A with a value of 0 - if type is not None. + (LIBCLOUD-247) + [Tomaz Muraus] + +- New driver for HostVirtual provider (www.vr.org). (LIBCLOUD-249) + [Dinesh Bhoopathy] + +- Finish Amazon Route53 driver. (LIBCLOUD-132) + [John Carr] + +- Add new driver for Gandi provider (https://www.gandi.net). (LIBCLOUD-281) + [John Carr] + +Load-Balancer +~~~~~~~~~~~~~ + +- Add new driver for AWS Elastic Load Balancing service. (LIBCLOUD-169) + [John Carr] + +Changes with Apache Libcloud 0.11.4 +----------------------------------- + +General +~~~~~~~ + +- Fix some of tests failures which happened in Python 3.3 due to randomized + dictionary ordering. (LIBCLOUD-245) + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Fix a bug where a numeric instead of a string value was used for the + content-length header in VCloud driver. (LIBCLOUD-256) + [Brian DeGeeter, Tomaz Muraus] + +Storage +~~~~~~~ + +- Don't ignore ex_force_service_region argument in the CloudFiles driver. + (LIBCLOUD-260) + [Dan Di Spaltro] + +- Fix a bug which caused the connection to not be closed when using Python + 2.6 and calling get_object on an object which doesn't exist in the S3 + driver. (LIBCLOUD-257) + [John Carr] + +DNS +~~~ + +- Update 'if type' checks in the update_record methods to behave correctly + if users passes in RecordType.A with a value of 0 - if type is not None. + (LIBCLOUD-247) + [Tomaz Muraus] + +Changes with Apache Libcloud 0.11.3 +----------------------------------- + +Storage +~~~~~~~ + +- Include 'last_modified' and 'content_type' attribute in the Object + 'extra' dictionary when retrieving object using get_object in the S3 + driver. Also modify 'meta_data' dictionary to include all the headers + prefixed with 'x-amz-meta-'. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.11.2 +----------------------------------- + +General +~~~~~~~ + +- Fix a bug with the Libcloud SSL verification code. Code was too strict and + didn't allow "-" character in the sub-domain when using a wildcard + certificate. + + Note: This is NOT a security vulnerability. (LIBCLOUD-244) + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Add new Rackspace Nova driver for Chicago (ORD) location (LIBCLOUD-234) + [Brian McDaniel] + +- Add capacity information to Vdc objects and implement power operations. + (LIBCLOUD-239) + [Michal Galet] + +- Allow user to pass 'timeout' argument to the 'deploy_node' method. + [Tomaz Muraus] + +- Add ex_list_security_groups, ex_authorize_security_group and + ex_describe_all_keypairs methods to the EC2 driver. (LIBCLOUD-241, + LIBCLOUD-243) + [Nick Bailey] + +- Add new methods for managing storage volumes and other extenstion methods + to the IBM SCE driver. (LIBCLOUD-242) + [Sengor Kusturica] + +Storage +~~~~~~~ + +- Add the following new methods to the CloudFiles driver: + ex_set_account_metadata_temp_url_key, ex_get_object_temp_url. (GITHUB-72) + [Shawn Smith] + +Load-balancer +~~~~~~~~~~~~~ + +- Add 'balancer' attribute to the Member instance. This attribute refers to + the LoadBalancer instance this member belongs to. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.11.1 +----------------------------------- + +General +~~~~~~~ + +- Fix hostname validation in the SSL verification code (CVE-2012-3446). + + Reported by researchers from the University of Texas at Austin (Martin + Georgiev, Suman Jana and Vitaly Shmatikov). + +Changes with Apache Libcloud 0.11.0 +----------------------------------- + +Compute +~~~~~~~ + +- Add a timeout of 10 seconds to OpenStackAuthConnection class. + (LIBCLOUD-199) + [Chris Gilmer] + +- Add time.sleep(wait_period) to _ssh_client_connect to prevent busy loops + when we immediately can't connect to a server. (LIBCLOUD-197) + [Jay Doane] + +- Fix a bug with Python 3 support in the following drivers + - IBM SCE, + - CloudStack + - CloudSigma + - OpenNebula + - VpsNet + - EC2 + - ElasticStack + - vCloud + - OpSource + - Slicehost + - Joyent + (LIBCLOUD-204) + [Sengor Kusturica, Hutson Betts, Tomaz Muraus] + +- Make CloudStack driver more robust and make it work if list_images() call + returns no images. (LIBCLOUD-202) + [Gabriel Reid] + +- Add force_ipv4 argument to _wait_until_running and default it to True. + This will make Libcloud ignore IPv6 addresses when using deploy_node. + (LIBCLOUD-200) + [Jay Doane, Tomaz Muraus] + +- Include error text if a CloudStack async job returns an error code. + (LIBCLOUD-207) + [Gabriel Reid] + +- Add extenstion methods for block storage volume management to the + CloudStack driver. (LIBCLOUD-208) + [Gabriel Reid] + +- New driver for KT UCloud (http://home.ucloud.olleh.com/main.kt) based on + the CloudStack driver. + [DaeMyung Kang] + +- Add a standard API and methods for managing storage volumes to the + EC2 and CloudStack drivers. Base API consistent of the following methods: + create_volume, destroy_volume, attach_volume, detach_volume. + (LIBCLOUD-213) + [Gabriel Reid] + +- Change ex_describe_tags, ex_create_tags and ex_delete_tags methods + signature in the EC2 driver. Argument is now called resource (previously + it was called node). This methods work with both Node and StorageVolume + objects. (LIBCLOUD-213) + [Gabriel Reid, Tomaz Muraus] + +- Add Rackspace Nova London driver. + [Chris Gilmer] + +- Fix a bug - If user doesn't pass in 'network_id' argument to the + create_node method in the CloudStack driver, don't explicitly define it. + (LIBCLOUD-219) + [Bruno MahĆ©, Tomaz Muraus] + +- Modify EC2 driver to also return cc2.8xlarge cluster compute instance in + the eu-west-1 region. + [Tomaz Muraus] + +- Add 'auth_user_variable' to the OpenStackAuthConnection class. + [Mark Everett] + +- Fix a bug with repeated URLs in some requests the vCloud driver. + (LIBCLOUD-222) + [Michal Galet] + +- New Gridspot driver with basic list and destroy functionality. + (LIBCLOUD-223) + [Amir Elaguizy] + +- Add methods for managing storage volumes to the Gandi driver. + (LIBCLOUD-225) + [Aymeric Barantal] + +DNS +~~~ + +- Add support for GEO RecordType to Zerigo driver. (LIBCLOUD-203) + [Gary Wilson] + +- Fix a bug with Python 3 support in the following drivers (LIBCLOUD-204) + - Zerigo + [Tomaz Muraus] + +- Add support for URL RecordType to Zerigo driver. (LIBCLOUD-209) + [Bojan Mihelac] + +- Properly handle record creation when user doesn't provider a record name + and wants to create a record for the actual domain. + Reported by Matt Perry (LIBCLOUD-224) + [Tomaz Muraus] + +Storage +~~~~~~~ + +- Fix a bug with Python 3 support in the following drivers + - Atmos + - Google Storage + - Amazon S3 + (LIBCLOUD-204) + [Tomaz Muraus] + +- Fix a bug in the CloudFiles driver which prevented it to work with + accounts which use a non ORD endpoint. (LIBCLOUD-205) + [Geoff Greer] + +- Fix a bug in the enable_container_cdn method. (LIBCLOUD-206) + [Geoff Greer] + +- Allow user to specify container CDN TTL when calling container.enable_cd() + using ex_ttl keyword argument in the CloudFiles driver. + [Tomaz Muraus] + +- Add ex_enable_static_website and ex_set_error_page method to the + CloudFiles driver. + [Tomaz Muraus] + +- Propagate kwargs passed to container.download_object() to + driver.download_object(). (LIBCLOUD-227) + [Benno Rice] + +- Fix a bug with not escaping container and object name in the Atmos driver. + [Russell Keith-Magee, Benno Rice] + +- Fix upload_object_via_stream method in the Atmos driver. (LIBCLOUD-228) + [Benno Rice] + +- Fix a bug with uploading zero-sized files in the OpenStack Swift / + CloudFiles driver. + [Tomaz Muraus] + +- Fix a bug with content_type and encoding of object and path names in + the Atmos driver. + [Russell Keith-Magee] + +Other +~~~~~ + +- Unify docstrings formatting in the compute drivers. (LIBCLOUD-229) + [Ilgiz Islamgulov] + +Changes with Apache Libcloud 0.10.1 +----------------------------------- + +General +~~~~~~~ + +- Add timeout attribute to base 'Connection' class and pass it to the + connection class constructor if Python version is not 2.5. + [Chris Gilmer] + +Compute +~~~~~~~ + +- Update IBM SBC driver so it works with IBM Smart Cloud Enterprise. + (LIBCLOUD-195) + [Sengor Kusturica] + +- Add ex_register_iso method to the CloudStack driver. (LIBCLOUD-196) + [Daemian Mack] + +- Allow user to specify which IP to use when calling deploy_node. + (defaults to 'public_ips'). Previously it only worked with public IP, now + user can pass 'private_ips' as an argument and SSH client will try to + connect to the node first private IP address. + [Jay Doane] + +- Fix CloudSigmaLvsNodeDriver connectionCls bug. + [Jerry Chen] + +- Add 'ex_keyname' argument to the create_node method in the OpenStack + driver. (LIBCLOUD-177) + [Jay Doane] + +- Fix a problem in deploy_node - make it work with providers which + don't instantly return created node in the list_node response. + Also add __str__ and __repr__ method to DeploymentError so the + error message is more useful. (LIBCLOUD-176) + [Jouke Waleson, Tomaz Muraus] + +- Add 'ssh_key' feature to Brigthbox driver. This way it works with + deploy_node. (LIBCLOUD-179) + [Neil Wilson] + +- Add Joyent compute driver. + [Tomaz Muraus] + +- Store auth token expire times on the connection class in the attribute + called 'auth_token_expires'. (LIBCLOUD-178) + [Chris Gilmer, Brad Morgan] + +- Add new driver for VCL cloud + (http://www.educause.edu/blog/hes8/CloudComputingandtheVirtualCom/167931) + (LIBCLOUD-180) + [Jason Gionta, Tomaz Muraus] + +- Improve and add new features to Brightbox driver + - Update fixtures to represent actual api output + - Update compute tests to 100% coverage + - Add userdata and server group extensions to create_node + - Add ipv6 support to public ip list + - Improve in line documentation + - Add lots of api output information to Node and Image + 'extra' attributes + - Allow variable API versions (api_version argument) + - Allow reverse dns updates for cloud ip extensions + + (LIBCLOUD-184) + [Neil Wilson, Tomaz Muraus] + +- Add ex_userdata argument to the OpenStack 1.1 driver. (LIBCLOUD-185) + [Jay Doane] + +- Modify Vmware vCloud driver and implement new features + for the vCloud version 1.5. (LIBCLOUD-183) + [Michal Galet, Sengor Kusturica] + +- Allow user to pass mode argument to SSHClient.put method and default it to + 'w'. (LIBCLOUD-188) + [Jay Doane] + +- Modify SSHKeyDeployment step to use append mode so it doesn't overwrite + existing entries in .ssh/authorized_keys. (LIBCLOUD-187) + [Jay Doane] + +- Modify ParamikoSSHClient to connect to the SSH agent and automatically + look for private keys in ~/.ssh if the 'auth' and 'ssh_key' argument + is not specified when calling deploy_node. (LIBCLOUD-182) + [Tomaz Muraus] + +- Add ex_rescue and ex_unrescue method to OpenStack 1.1 driver. + (LIBCLOUD-193) + [Shawn Smith] + +- Include 'password' in the node extra dictionary when calling deploy_node + if the password auth is used. + [Juan Carlos Moreno] + +- Add FileDeployment class to libcloud.compute.deployment module. This can + be used as a replacement for ex_files argument if the provider supports + deployment functionality. (LIBCLOUD-190) + [Jay Doane] + +Storage +~~~~~~~ + +- Large object upload support for CloudFiles driver +- Add CLOUDFILES_SWIFT driver to connect to OpenStack Swift + [Dmitry Russkikh, Roman Bogorodskiy] + +Load-balancer +~~~~~~~~~~~~~ + +- Don't include 'body_regex' attribute in the Rackspace driver body if + body_regex is None or empty string. (LIBCLOUD-186) + [Bill Woodward] + +- Don't split Load balancer IP addresses into public and private list. + Include all the addresses in the 'virtualIps' variable in the extra + dictionary (Rackspace driver). (LIBCLOUD-191) + [Adam Pickeral] + +Changes with Apache Libcloud 0.9.1 +---------------------------------- + +General +~~~~~~~ + +- Make parsing of the Auth API responses in the OpenStack drivers more + flexible and extensible. + + Now, every connection class that inherits from the openstack base + connection must implement get_endpoint(), who's job is to return the + correct endpoint out of the service catalog. + + Note: The openstack.py base driver no longer works by default with + Rackspace nova. The default endpoint parsed from the service catalog + is the default compute endpoint for devstack. (LIBCLOUD-151) + [Brad Morgan] + +- Allow user to pass ex_tenant_name keyword argument to the OpenStack node + driver class. This scopes all the endpoints returned by the Auth API + endpoint to the provided tenant. (LIBCLOUD-172) + [James E. Blair] + +- Allow user to specify OpenStack service catalog parameters (service type, + name and region). This way base OpenStack driver can be used with + different providers without needing to subclass. (LIBCLOUD-173) + [James E. Blair] + +- Fix a bug with handling compressed responses in the Linode driver. + (LIBCLOUD-158) + [Ben Agricola] + +Compute +~~~~~~~ + +- Add new RackspaceNovaBeta and RackspaveNovaDfw driver based on the + OpenStack. (LIBCLOUD-151) + [Brad Morgan] + +- Include 'created' and 'updated' attribute in the OpenStack 1.1 driver. + (LIBCLOUD-155) + [Chris Gilmer] + +- Include 'minRam' and 'minDisk' attribute in the OpenStack 1.1 driver + Node extra dictionary. (LIBCLOUD-163) + [Chris Gilmer] + +- Allow users to use a list of tuples for the query string parameters inside + the OpenStack connection classes. This way same key can be specified + multiple times (LIBCLOUD-153) + [Dave King] + +- Allow user to pass 'max_tries' keyword argument to deploy_node method. + [Tomaz Muraus] + +- Include original exception error message when re-throwing an exception + inside _run_deployment_script method. + [Tomaz Muraus] + +- Add support for ElasticHosts new United States (Los Angeles) and Canada + (Toronto) locations. (GITHUB-53) + [Jaime Irurzun] + +- Add serverId attribute to the NodeImage object extra dictionary in the + OpenStack driver. + [Mark Everett] + +- Add new EC2 instance type - m1.medium. + [Tomaz Muraus] + +- Allow user to re-use auth tokens and pass 'ex_force_auth_token' keyword + argument to the OpenStack driver constructor. (LIBCLOUD-164) + [Dave King] + +- Add new experimental libvirt driver. + [Tomaz Muraus] + +- Properly handle OpenStack providers which return public IP addresses under + the 'internet' key in the addresses dictionary. + [Tomaz Muraus] + +- Update create_node in Linode driver and make it return a Node object + instead of a list. Reported by Jouke Waleson. (LIBCLOUD-175) + [Tomaz Muraus] + +Storage +~~~~~~~ + +- Don't lowercase special header names in the Amazon S3 storage driver. + (LIBCLOUD-149) + [Tomaz Muraus] + +Load-balancer +~~~~~~~~~~~~~ + +- Allow user to specify a condition and weight when adding a member in + the Rackspace driver. + [Adam Pickeral] + +- Add an extension method (ex_balancer_attach_members) for attaching + multiple members to a load balancer in the Rackspace driver. + (LIBCLOUD-152) + [Adam Pickeral] + +- Add ex_creaate_balancer method to the Rackspace driver and allow user to + pass 'vip' argument to it. (LIBCLOUD-166) + [Adam Pickeral] + +- Update Rackspace driver to support Auth 2.0. (LIBCLOUD-165) + [Dave King] + +- Add new ex_create_balancer_access_rule and + ex_create_balancer_access_rule_no_poll method to the Rackspace driver. + (LIBCLOUD-170) + [Dave King] + +DNS +~~~ + +- Update Rackspace driver to support Auth 2.0. (LIBCLOUD-165) + [Dave King] + +Changes with Apache Libcloud 0.8.0 +---------------------------------- + +General +~~~~~~~ + +- Add 'request_kwargs' argument to the get_poll_request_kwargs method. + This argument contains kwargs which were previously used to initiate the + poll request. + [Mark Everett] + +- Add support for handling compressed responses (deflate, gzip). Also send + "Accept-Encoding" "gzip,deflate" header with all the requests. + [Tomaz Muraus] + +- Fix debug module (LIBCLOUD_DEBUG env variable) so it works with Python 3 + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Added support for retrieving OpenNebula v3.2 instance types, OpenNebula + v3.0 network Public attribute support, and additional code coverage + tests. + [Hutson Betts] + +- Add implementation for ex_save_image method to the OpenStack 1.1 driver. + [Shawn Smith] + +- Add support for Amazon new South America (Sao Paulo) location. + [Tomaz Muraus] + +- Fix a bug in OpenStack driver when 2.0_apikey or 2.0_password + 'auth_version' is used. + [Tomaz Muraus] + +- Current OpenNebula OCCI implementation does not support a proper + restart method. Rather it suspends and resumes. Therefore, restart_node + has been removed from the OpenNebula driver. + [Hutson Betts] + +- Enable ex_delete_image method in the OpenStack 1.1 driver. + [Shawn Smith] + +- Return NodeImage instance in OpenStack 1.1 driver ex_save_image method + (LIBCLOUD-138) + [Shawn Smith] + +- Enable reboot_node method in the OpenNebula 3.2 driver. + [Hutson Betts] + +- Fix a public_ips Node variable assignment in the Gandi.net driver. + [Aymeric Barantal] + +- Updated the list of node states for OpenNebula drivers. (LIBCLOUD-148) + [Hutson Betts] + +Storage +~~~~~~~ + +- Propagate extra keyword arguments passed to the Rackspace driver + connection class. + [Dave King] + +Load-balancer +~~~~~~~~~~~~~ + +- Add 'extra' attribute to the LoadBalancer object and retrieve all the + virtual IP addresses in the Rackspace driver. + [Dave King] + +- Add list_supported_algorithms() method to the base LoadBalancer class. + This method returns a list of supported algorithms by the provider. + [Dave King] + +- Update Rackspace driver: + - Add two new supported algorithms: WEIGHTED_ROUND_ROBIN, + WEIGHTED_LEAST_CONNECTIONS + - Add ex_list_algorithm_names method + - Add ex_get_balancer_error_page method + - Add ex_balancer_access_list method + - Populate LoadBalancer extra dictionary with more attributes + - Add support for health monitors and connection throttling + - Add more balancer states + - ex_list_protocols_with_default_ports + + [Dave King] + +- Propagate extra keyword arguments passed to the Rackspace driver + connection class. + [Dave King] + +- Add 'extra' attribute to the Member object and populate it in + the Rackspace driver. + [Mark Everett] + +- Adds status to the Member object and conditions an 'enum' + (Rackspace driver). + [Mark Everett] + +- Add update_balancer method to the base LoadBalancer class. + [Mark Everett] + +- Add update_balancer method to the Rackspace driver. + [Mark Everett] + +- Add created and updated attribute to the LoadBalancer extra dictionary in + the Rackspace driver. + [Mark Everett] + +- Fix protocol name maping in the Rackspace driver. + [Bill Woodward] + +Changes with Apache Libcloud 0.7.1 +---------------------------------- + +General +~~~~~~~ + + - Fix a minor bug in debug mode (LIBCLOUD_DEBUG=/dev/stderr) which has been + introduced when adding Python 3 compatibility layer. + [Paul Querna] + + - Update OpenStack Auth API endpoint paths. + [Paul Querna] + +Changes with Apache Libcloud 0.7.0 +---------------------------------- + +General +~~~~~~~ + +- Add support for Python 3.x. + [Tomaz Muraus] + +- Remove old deprecated paths. + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Update CloudSigma Zurich API endpoint address. + [Tomaz Muraus] + +- Add new US Las Vegas endpoint to CloudSigma driver (types.CLOUDSIGMA_US) + [Tomaz Muraus] + +- Allow user to specify drive type (hdd, ssd) when creating a + CloudSigma server. + + Note 'ssd' drive_type doesn't work with the API yet. + [Tomaz Muraus] + +- Update OpenStack 1.1 driver to comply with the API specs. Need to make + another call to retrieve node name and ip addresses when creating a node, + because the first call only returns an id an the password. (GITHUB-40) + [Dave King] + +- Add ex_node_ids argument to the EC2 driver list_nodes method. + (GITHUB-39) + [Suvish Vt] + +- If OpenStack Auth 2.0 API is used, also parse out tenant id and + name and save it on the connection class (conn.tenant['id'], + conn.tenant['name']). + [Tomaz Muraus] + +- Add new "Cluster Compute Eight Extra Large" size to the Amazon EC2 + driver. + [Tomaz Muraus] + +- Add the following extension methods to the EC2 compute driver: + ex_describe_all_addresses, ex_associate_addresses, ex_start_node, + ex_stop_node. + [Suvish Vt] + +- Change public_ip and private_ip attribute on the Node object to the + public_ips and private_ips since both of the objects are always a list. + + Note: For backward compatibility you can still access public_ip and + private_ip attributes, but this will be removed in the next release. + [Tomaz Muraus] + +- Fix an inconsistency in IBM SBC driver and make sure public_ips and + private_ips attributes are a list. + [Tomaz Muraus] + +- Fix an inconsistency in OpSource driver and make sure public_ips is an + empty list ([]), not 'unknown' + [Tomaz Muraus] + +- Updated support for OpenNebula.org v1.4, v2.x, and v3.x APIs and included + additional compute tests validating functionality. (LIBCLOUD-121) + [Hutson Betts] + +Load-balancer +~~~~~~~~~~~~~ + +- Add ex_member_address argument to the Rackspace driver list_balancers + method. If this argument is provided, only loadbalancers which have a + member with the provided IP address attached are returned. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.6.2 +---------------------------------- + +General +~~~~~~~ + +- Fix a bug in PollingConnection class - actually use and don't ignore + the poll_interval + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Add support for Auth 2.0 API (keystone) to the OpenStack Auth + connection class. + [Brad Morgan] + +- Add list_locations method to the OpenStack driver and fix some + inconsistencies in the OpenStack driver extension method signatures. + [Brad Morgan] + +- Update Amazon EC2 driver and pricing data to support a new region - + US West 2 (Oregon) + [Tomaz Muraus] + +- Expose 'CLOUDSTACK' provider. This driver can be used with an + arbitrary CloudStack installation. + [Tomaz Muraus] + +Storage +~~~~~~~ + +- Update Amazon S3 driver to support a new region - US West 2 (Oregon) + [Tomaz Muraus] + +DNS +~~~ + +- Increase the default poll interval in the Rackspace driver to 2.5 + seconds. + [Tomaz Muraus] + +- Fix a bug in Rackspace Cloud DNS driver and make sure to throw an + exception if an unexpected status code is returned. Reported by + "jeblair". + [Tomaz Muraus] + +Changes with Apache Libcloud 0.6.1 +---------------------------------- + +General +~~~~~~~ + +- Modify ParamikoSSHClient.connect so it supports authentication using a + key file, (LIBCLOUD-116) + [Jay Doane] + +- User must now explicitly specify a path when using LIBCLOUD_DEBUG + environment variable. (LIBCLOUD-95) + [daveb, Tomaz Muraus] + +- Add new XmlResponse and JsonResponse base class and modify all the + driver-specific response classes to inherit from one of those two + classes where applicable. + [Caio RomĆ£o] + +- Add new 'PollingConnection' class. This class can work with 'async' + APIs. It sends and an initial request and then periodically poll the API + until the job has completed or a timeout has been reached. + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Add 24GB size to the GoGrid driver + [Roman Bogorodskiy] + +- Fix API endpoint URL in the Softlayer driver + [Tomaz Muraus] + +- Add support for OpenNebula 3.0 API (LIBCLOUD-120) + [Hutson Betts] + +- Add more attributes to the extra dictionary in the EC2 driver. + (GITHUB-31) + [Juan Carlos Moreno] + +- Fix IP address assignment in the EC2 driver. Don't include "None" in the + public_ip and private_ip Node list attribute. + [Tomaz Muraus] + +- Make deploy_node functionality more robust and don't start deployment if + node public_ip attribute is an empty list. + [Tomaz Muraus] + +- Support SSH key authentication when using deploy_node. + [Russell Haering, Tomaz Muraus] + +- Enable deploy_node functionality in the EC2 driver using SSH key + authentication + [Russell Haering, Tomaz Muraus] + +- Enable paramiko library debug log level if LIBCLOUD_DEBUG is used and + paramiko is installed. + [Tomaz Muraus] + +- Fix the request signature generation in the base EC2 compute driver. + If the endpoint is using a non-standard port (Eucalyptus based + installations), append it to the hostname used to generate the + signature. + [Simon Delamare] + +- Add new "unavailable" state to the BrightboxNodeDriver class. + [Tim Fletcher] + +- Increase a PollingConnection timeout in the CloudStack connection + and fix the context dictionary creation in the _async_request method. + [Oleg Suharev] + +- Fix networks retrieval in the CloudStack driver create_node method. + Also only pass 'networkids' field to the API if there are any networks + available. + [Oleg Suharev, Tomaz Muraus] + +- Fix list_nodes in the CloudStack driver. Private IPs aren't always + available. + [Tomaz Muraus] + +Load-baancer +~~~~~~~~~~~~ + +- Add a missing argument to the method call inside + LoadBalancer.attach_compute_node and Driver.balancer_attach_compute_node. + [Tim Fletcher, Tomaz Muraus] + +- Add missing destroy() method to the LoadBalancer class. + [Tomaz Muraus] + +DNS +~~~ + +- New drivers for Rackspace Cloud DNS (US and UK region) + [Tomaz Muraus] + +- Add list_record_types() method. This method returns a list of record + types supported by the provider. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.6.0-beta1 +---------------------------------------- + +General +~~~~~~~ + +- All the driver classes now inherit from the BaseDriver class + [Tomaz Muraus] + +- Prefer simplejson (if available) over json module. (LIBCLOUD-112) + [Geoff Greer] + +- Update compute demo and change the syntax of test credentials stored in + test/secrets.py-dist. (LIBCLOUD-111) + [Mike Nerone] + +- Enable SSL certificate verification by default and throw an exception + if CA certificate files cannot be found. This can be overridden by + setting libcloud.security.VERIFY_SSL_CERT_STRICT to False. + [Tomaz Muraus] + +Compute +~~~~~~~ + +- Support for 1.1 API and many other improvements in the OpenStack driver ; + LIBCLOUD-83 + [Mike Nerone, Paul Querna, Brad Morgan, Tomaz Muraus] + +- Add some extra methods to the Gandi.net driver (LIBCLOUD-115) + [Aymeric Barantal] + +- Add ex_delete_image method to the Rackspace driver. (GITHUB-27) + [David Busby] + +- Linode driver now supports new 'Japan' location + [Jed Smith] + +- Rackspace driver now inherits from the OpenStack one instead of doing + it vice versa. (LIBCLOUD-110) + [Mike Nerone] + +- Properly populate NodeImage "details" dictionary in the Rackspace + compute driver. (LIBCLOUD-107) + [Lucy Mendel] + +- Fix a bug in Eucalyptus driver ex_describe_addresses method. + [Tomaz Muraus] + +- Add the following new extenstion methods to the Rackspace driver: + ex_resize, ex_confirm_resize, ex_revert_resize. + [Tomaz Muraus] + +- Also allow user to pass in Node object to some ex\_ methods in + the Rackspace compute driver. + [Tomaz Muraus] + +- Throw an exception in deploy_node if paramiko library is not + available + [Tomaz Muraus] + +- Fix chmod argument value which is passed to the sftpclient.put + method; GITHUB-17 + [John Carr] + +- New driver for Ninefold.com. (LIBCLOUD-98) + [Benno Rice] + +Storage +~~~~~~~ + +- New driver for Google Storage based on the v1.0 / legacy API + [Tomaz Muraus] + +- New driver for Ninefold.com. (GITHUB-19) + [Benno Rice] + +- Fix a bug in uploading an object with some versions of Python 2.7 + where httplib library doesn't automatically call str() on the + header values. + [Tomaz Muraus] + +- Allow users to upload (create) 0-bytes large (empty) objects + [Tomaz Muraus] + +Load-balancer +~~~~~~~~~~~~~ + +- New driver for Rackspace UK location + [Tomaz Muraus] + +- New driver for Ninefold.com. (LIBCLOUD-98) + [Benno Rice] + +DNS +~~~ + +- Drivers for Linode DNS and Zerigo DNS + [Tomaz Muraus] + +- Brand new DNS API! + [Tomaz Muraus] + +Changes with Apache Libcloud 0.5.2 +---------------------------------- + +Compute +~~~~~~~ + +- New driver for serverlove.com and skalicloud.com + [Tomaz Muraus] + +- Fix node name and tag handling in the Amazon EC2 driver + [Wiktor Kolodziej] + +- Fix pricing and response handling in the OpenStack driver + [Andrey Zhuchkov] + +- Fix deploy_node() method and make it more robust + [Tomaz Muraus] + +- Users can now pass file like objects to ScriptDeployment and + SSHKeyDeployment constructor. + [Tomaz Muraus] + +- Include node tags when calling list_nodes() in the Amazon EC2 + driver + [Trevor Pounds] + +- Properly handle response errors in the Rackspace driver and + only throw InvalidCredsError if the returned status code is 401 + [Brad Morgan] + +- Fix the create_node method in the Nimbus driver and make the + "ex_create_tag" method a no-op, because Nimbus doesn't support creating + tags. + [Tomaz Muraus] + +Storage +~~~~~~~ + +- Fix handling of the containers with a lot of objects. Now a LazyList + object is returned when user calls list_container_objects() method + and this object transparently handles pagination. + [Danny Clark, Wiktor Kolodziej] + +Changes with Apache Libcloud 0.5.0 +---------------------------------- + +- Existing APIs directly on the libcloud.* module have been + deprecated and will be removed in version 0.6.0. Most methods + were moved to the libcloud.compute.* module. + +- Add new libcloud.loadbalancers API, with initial support for: + - GoGrid Load Balancers + - Rackspace Load Balancers + + [Roman Bogorodskiy] + +- Add new libcloud.storage API, with initial support for: + - Amazon S3 + - Rackspace CloudFiles + + [Tomaz Muraus] + +- Add new libcloud.compute drivers for: + - Bluebox [Christian Paredes] + - Gandi.net [Aymeric Barantal] + - Nimbus [David LaBissoniere] + - OpenStack [Roman Bogorodskiy] + - Opsource.net [Joe Miller] + +- Added "pricing" module and improved pricing handling. + [Tomaz Muraus] + +- Updates to the GoGrid compute driver: + - Use API version 1.0. + - Remove sandbox flag. + - Add ex_list_ips() to list IP addresses assigned to the account. + - Implement ex_edit_image method which allows changing image attributes + like name, description and make image public or private. + + [Roman Bogorodskiy] + +- Updates to the Amazon EC2 compute driver: + - When creating a Node, use the name argument to set a Tag with the + value. [Tomaz Muraus] + - Add extension method for modifying node attributes and changing the + node size. [Tomaz Muraus] + - Add support for the new Amazon Region (Tokyo). [Tomaz Muraus] + - Added ex_create_tags and ex_delete_tags. [Brandon Rhodes] + - Include node Elastic IP addresses in the node public_ip attribute + for the EC2 nodes. [Tomaz Muraus] + - Use ipAddress and privateIpAddress attribute for the EC 2node public + and private ip. [Tomaz Muraus] + - Add ex_describe_addresses method to the EC2 driver. [Tomaz Muraus] + +- Updates to the Rackspace CloudServers compute driver: + - Add ex_rebuild() and ex_get_node_details() [Andrew Klochkov] + - Expose URI of a Rackspace node to the node meta data. [Paul Querna] + +- Minor fixes to get the library and tests working on Python 2.7 and PyPy. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.4.2 (Released January 18, 2011) +-------------------------------------------------------------- + +- Fix EC2 create_node to become backward compatible for + NodeLocation. + [Tomaz Muraus] + +- Update code for compatibility with CPython 2.5 + [Jerry Chen] + +- Implement ex_edit_node method for GoGrid driver which allows + changing node attributes like amount of RAM or description. + [Roman Bogorodskiy] + +- Add ex_set_password and ex_set_server_name to Rackspace driver. + [Peter Herndon, Paul Querna] + +- Add Hard and Soft reboot methods to Rackspace driver. + [Peter Herndon] + +- EC2 Driver availability zones, via ex_list_availability_zones; + list_locations rewrite to include availability zones + [Tomaz Muraus] + +- EC2 Driver Idempotency capability in create_node; LIBCLOUD-69 + [David LaBissoniere] + +- SSL Certificate Name Verification: + - libcloud.security module + - LibcloudHTTPSConnection, LibcloudHTTPConnection (alias) + - Emits warning when not verifying, or CA certs not found + +- Append ORD1 to available Rackspace location, but keep in the + same node as DFW1, because it's not readable or writeable from + the API. + [Per suggestion of Grig Gheorghiu] + +- ex_create_ip_group, ex_list_ip_groups, ex_delete_ip_group, + ex_share_ip, ex_unshare_ip, ex_list_ip_addresses additions + to Rackspace driver + [Andrew Klochkov] + +- New driver for CloudSigma. + [Tomaz Muraus] + +- New driver for Brightbox Cloud. (LIBCLOUD-63) + [Tim Fletcher] + +- Deployment capability to ElasticHosts + [Tomaz Muraus] + +- Allow deploy_node to use non-standard SSH username and port + [Tomaz Muraus] + +- Added Rackspace UK (London) support + [Chmouel Boudjnah] + +- GoGrid driver: add support for locations, i.e. listing + of locations and creation of a node in specified + location + [Roman Bogorodskiy] + +- GoGrid and Rackspace drivers: add ex_save_image() extra + call to convert running node to an image + [Roman Bogorodskiy] + +- GoGrid driver: add support for creating 'sandbox' server + and populate isSandbox flag in node's extra information. + [Roman Bogorodskiy] + +- Add ImportKeyPair and DescribeKeyPair to EC2. (LIBCLOUD-62) + [Philip Schwartz] + +- Update EC2 driver and test fixtures for new API. + [Philip Schwartz] + +Changes with Apache Libcloud 0.4.0 [Released October 6, 2010] +------------------------------------------------------------- + +- Add create keypair functionality to EC2 Drivers. (LIBCLOUD-57) + [Grig Gheorghiu] + +- Improve handling of GoGrid accounts with limited access + API keys. + [Paul Querna] + +- New Driver for ElasticHosts. (LIBCLOUD-45) + [Tomaz Muraus] + +- Use more consistent name for GoGrid driver and use http + POST method for 'unsafe' operations + [Russell Haering] + +- Implement password handling and add deployment support + for GoGrid nodes. + [Roman Bogorodskiy] + +- Fix behavior of GoGrid's create_node to wait for a Node ID. + [Roman Bogorodskiy] + +- Add ex_create_node_nowait to GoGrid driver if you don't need to + wait for a Node ID when creating a node. + [Roman Bogorodskiy] + +- Removed libcloud.interfaces module. + [Paul Querna] + +- Removed dependency on zope.interfaces. + [Paul Querna] + +- RimuHosting moved API endpoint address. + [Paul Querna] + +- Fix regression and error in GoGrid driver for parsing node objects. + [Roman Bogorodskiy] + +- Added more test cases for GoGrid driver. (LIBCLOUD-34) + [Roman Bogorodskiy, Jerry Chen] + +- Fix parsing of Slicehost nodes with multiple Public IP addresses. + [Paul Querna] + +- Add exit_status to ScriptDeployment. (LIBCLOUD-36) + [Paul Querna] + +- Update prices for several drivers. + [Brad Morgan, Paul Querna] + +- Update Linode driver to reflect new plan sizes. + [Jed Smith] + +- Change default of 'location' in Linode create_node. (LIBCLOUD-41) + [Jed Smith, Steve Steiner] + +- Document the Linode driver. + [Jed Smith] + +- Request a private, LAN IP address at Linode creation. + [Jed Smith] + +Changes with Apache Libcloud 0.3.1 [Released May 11, 2010] +---------------------------------------------------------- + +- Updates to Apache License blocks to correctly reflect status as an + Apache Project. + +- Fix NOTICE file to use 2010 copyright date. + +- Improve error messages for when running the test cases without + first setting up a secrets.py + +Changes with Apache Libcloud 0.3.0 [Tagged May 6, 2010, not released] +--------------------------------------------------------------------- + +- New Drivers for: + - Dreamhost + - Eucalyptus + - Enomaly ECP + - IBM Developer Cloud + - OpenNebula + - SoftLayer + +- Added new deployment and bootstrap API. + +- Improved Voxel driver. + +- Added support for Amazon EC2 Asia Pacific (Singapore) Region. + +- Improved test coverage for all drivers. + +- Add support for multiple security groups in EC2. + +- Fixed bug in Rackspace and RimuHosting when using multiple threads. + +- Improved debugging and logging of HTTP requests. + +- Improved documentation for all classes and methods. + +Changes with Apache Libcloud 0.2.0 [Tagged February 2, 2010] +------------------------------------------------------------ + +- First public release. diff -Nru libcloud-0.5.0/debian/changelog libcloud-0.15.1/debian/changelog --- libcloud-0.5.0/debian/changelog 2012-08-19 14:24:52.000000000 +0000 +++ libcloud-0.15.1/debian/changelog 2016-02-15 23:02:50.000000000 +0000 @@ -1,13 +1,47 @@ -libcloud (0.5.0-1.1) unstable; urgency=low +libcloud (0.15.1-1~trusty10) trusty; urgency=medium - * Non-maintainer upload. - * [SECURITY] Fix "CVE-2012-3446: MITM vulnerability in TLS/SSL certificates - verification": add patch - 0001-Fix-hostname-validation-in-the-SSL-verification-code.patch - taken from upstream git. - (Closes: #683927) + * backport to trusty - -- gregor herrmann Sun, 19 Aug 2012 16:24:16 +0200 + -- Hans-Christoph Steiner Tue, 16 Feb 2016 00:02:50 +0100 + +libcloud (0.15.1-1) unstable; urgency=medium + + * New upstream release. + * Build Python3 package (Closes: #745962). Thanks to + Daniel Lintott for the initial patch! + * Correctly install upstream changelog. + * Add lintian overrides for privacy-breach-generic as they are + being triggered by dummy data used in the testsuite. + * Install example files. + * Add autopackage tests. + + -- Andrew Starr-Bochicchio Sun, 06 Jul 2014 21:59:57 -0400 + +libcloud (0.14.1-1) unstable; urgency=low + + * new upstream release + + -- Hans-Christoph Steiner Thu, 17 Apr 2014 11:41:46 -0400 + +libcloud (0.13.3-1) unstable; urgency=low + + [ Jakub Wilk ] + * Add Vcs-* fields. + + [ Julien Cristau ] + * New upstream release + * Bump X-Python-Version to 2.6+, 2.5 didn't have an ssl module. + + [ Jakub Wilk ] + * Use canonical URIs for Vcs-* fields. + + [ Hans-Christoph Steiner ] + * Non-maintainer upload (closes: #679881) + * Newest upstream release where the tests pass (tests are broken in + 0.14.0 and 0.14.1). + * Bumped Standards-Version to 3.9.5. No changes needed. + + -- Hans-Christoph Steiner Wed, 16 Apr 2014 22:09:23 -0400 libcloud (0.5.0-1) unstable; urgency=low diff -Nru libcloud-0.5.0/debian/clean libcloud-0.15.1/debian/clean --- libcloud-0.5.0/debian/clean 2011-06-14 11:00:58.000000000 +0000 +++ libcloud-0.15.1/debian/clean 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -test/secrets.py -debian/stamp-makefile-check diff -Nru libcloud-0.5.0/debian/control libcloud-0.15.1/debian/control --- libcloud-0.5.0/debian/control 2011-06-09 10:04:46.000000000 +0000 +++ libcloud-0.15.1/debian/control 2014-07-07 02:03:09.000000000 +0000 @@ -2,22 +2,92 @@ Section: python Priority: optional Maintainer: Debian Python Modules Team -Uploaders: Soren Hansen -Build-Depends: debhelper (>= 7.0.50~), python-all (>= 2.6.6-3~), python-simplejson -Standards-Version: 3.9.2 -X-Python-Version: >= 2.5 -Homepage: http://libcloud.org/ +Uploaders: Soren Hansen , + Hans-Christoph Steiner , + Andrew Starr-Bochicchio +Build-Depends: debhelper (>= 7.0.50~), + openssh-client, + python-all (>= 2.6.6-3~), + python-crypto (>= 2.6), + python-simplejson, + python-setuptools, + python-mock (>= 0.8.0), + python-epydoc, + python-pydoctor, + python3-all, + python3-crypto (>= 2.6), + python3-simplejson, + python3-setuptools, + python3-mock (>= 0.8.0) +Standards-Version: 3.9.5 +X-Python-Version: >= 2.6 +X-Python3-Version: >= 3.0 +XS-Testsuite: autopkgtest +Homepage: https://libcloud.apache.org/ +Vcs-Svn: svn://anonscm.debian.org/python-modules/packages/libcloud/trunk/ +Vcs-Browser: http://anonscm.debian.org/viewvc/python-modules/packages/libcloud/trunk/ Package: python-libcloud Architecture: all -Depends: ${misc:Depends}, ${python:Depends}, python-simplejson +Depends: ${misc:Depends}, + ${python:Depends}, + python-crypto (>= 2.6), + python-lockfile, + python-simplejson Description: unified Python interface into the cloud - libcloud is a pure Python client library for interacting with many of - the popular cloud server providers. It was created to make it easy for - developers to build products that work between any of the services that - it supports. + libcloud is a pure Python client library for interacting with many of the + popular cloud server providers using a unified API. It was created to make it + easy for developers to build products that work between any of the services + that it supports. + . + * Avoid vendor lock-in + * Use the same API to talk to many different providers + * More than 30 supported providers total + * Four main APIs: Compute, Storage, Load Balancers, DNS + * Supports Python 2.5, Python 2.6, Python 2.7, PyPy and Python 3 + . + Resource you can manage with Libcloud are divided in the following categories: + . + * Cloud Servers and Block Storage - services such as Amazon EC2 and + Rackspace Cloud Servers (libcloud.compute.*) + * Cloud Object Storage and CDN - services such as Amazon S3 and + Rackspace CloudFiles (libcloud.storage.*) + * Load Balancers as a Service, LBaaS (libcloud.loadbalancer.*) + * DNS as a Service, DNSaaS (libcloud.dns.*) . libcloud was originally created by the folks over at Cloudkick, but has since grown into an independent free software project licensed under the Apache License (2.0). +Package: python3-libcloud +Architecture: all +Depends: ${misc:Depends}, + ${python3:Depends}, + python3-crypto (>= 2.6), + python3-simplejson +Description: unified Python interface into the cloud (Python3 version) + libcloud is a pure Python client library for interacting with many of the + popular cloud server providers using a unified API. It was created to make it + easy for developers to build products that work between any of the services + that it supports. + . + * Avoid vendor lock-in + * Use the same API to talk to many different providers + * More than 30 supported providers total + * Four main APIs: Compute, Storage, Load Balancers, DNS + * Supports Python 2.5, Python 2.6, Python 2.7, PyPy and Python 3 + . + Resource you can manage with Libcloud are divided in the following categories: + . + * Cloud Servers and Block Storage - services such as Amazon EC2 and + Rackspace Cloud Servers (libcloud.compute.*) + * Cloud Object Storage and CDN - services such as Amazon S3 and + Rackspace CloudFiles (libcloud.storage.*) + * Load Balancers as a Service, LBaaS (libcloud.loadbalancer.*) + * DNS as a Service, DNSaaS (libcloud.dns.*) + . + libcloud was originally created by the folks over at Cloudkick, but has + since grown into an independent free software project licensed under + the Apache License (2.0). + . + This is the Python 3 version of the package. diff -Nru libcloud-0.5.0/debian/copyright libcloud-0.15.1/debian/copyright --- libcloud-0.5.0/debian/copyright 2011-06-08 18:36:15.000000000 +0000 +++ libcloud-0.15.1/debian/copyright 2014-07-06 23:55:51.000000000 +0000 @@ -1,32 +1,28 @@ -This package was debianized by Soren Hansen on -Sat, 07 Nov 2009 22:40:06 +0100. - -{test/test_,libcloud/drivers/}rimuhosting.py and drivers/ibm_sbc.py are -Copyright 2009 RedRata Ltd. - -{test/test_,libcloud/drivers/}opennebula.py are Copyright 2002-2009 -Distributed Systems Architecture Group, Universidad Complutense de -Madrid (dsa-research.org) - -Everything else is Copyright 2010 The Apache Software Foundation. - -License: - Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed with - this work for additional information regarding copyright ownership. - The ASF licenses this file to You under the Apache License, Version 2.0 - (the "License"); you may not use this file except in compliance with - the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -For the full license, see '/usr/share/common-licenses/Apache-2.0'. - -The Debian packaging is Copyright 2009-2010 Soren Hansen, and covered by the -Apache 2.0 license. +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: apache-libcloud +Upstream-Contact: dev@libcloud.apache.org +Source: https://git-wip-us.apache.org/repos/asf/libcloud.git + +Comment: + This package was debianized by Soren Hansen on + Sat, 07 Nov 2009 22:40:06 +0100. + +Files: test/test_rimuhosting.py libcloud/drivers/rimuhosting.py drivers/ibm_sbc.py +Copyright: 2009, RedRata Ltd. +License: Apache-2.0 + +Files: test_opennebula.py libcloud/drivers/opennebula.py +Copyright: 2002-2009, Distributed Systems Architecture Group, Universidad Complutense de Madrid (dsa-research.org) +License: Apache-2.0 + +Files: debian/* +Copyright: 2009-2010, Soren Hansen +License: Apache-2.0 + +Files: * +Copyright: 2009-2014, The Apache Software Foundation. +License: Apache-2.0 + +License: Apache-2.0 + On Debian systems the full text of the Apache License can be found + in the `/usr/share/common-licenses/Apache-2.0' file. diff -Nru libcloud-0.5.0/debian/docs libcloud-0.15.1/debian/docs --- libcloud-0.5.0/debian/docs 2011-06-09 09:27:15.000000000 +0000 +++ libcloud-0.15.1/debian/docs 2014-07-07 01:32:25.000000000 +0000 @@ -1 +1 @@ -README +README.rst diff -Nru libcloud-0.5.0/debian/examples libcloud-0.15.1/debian/examples --- libcloud-0.5.0/debian/examples 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/examples 2014-07-07 01:42:44.000000000 +0000 @@ -0,0 +1,2 @@ +example_* +demos/* diff -Nru libcloud-0.5.0/debian/patches/0001-Fix-hostname-validation-in-the-SSL-verification-code.patch libcloud-0.15.1/debian/patches/0001-Fix-hostname-validation-in-the-SSL-verification-code.patch --- libcloud-0.5.0/debian/patches/0001-Fix-hostname-validation-in-the-SSL-verification-code.patch 2012-08-19 14:24:07.000000000 +0000 +++ libcloud-0.15.1/debian/patches/0001-Fix-hostname-validation-in-the-SSL-verification-code.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -Bug-Debian: http://bugs.debian.org/683927 -Reviewed-by: gregor herrmann -Last-Update: 2012-08-19 -Origin: https://github.com/apache/libcloud/commit/f2af5502dae3ac63e656dd1b7d5f29cc82ded401 -Comment: This is f2af550 from upstream git, minus the changes in the Changes - file and the version change in __init__.py, plus adjustments for offsets - -From f2af5502dae3ac63e656dd1b7d5f29cc82ded401 Mon Sep 17 00:00:00 2001 -From: Tomaž Muraus -Date: Thu, 2 Aug 2012 00:39:09 +0000 -Subject: [PATCH] Fix hostname validation in the SSL verification code - (CVE-2012-3446). Reported by researchers from the - University of Texas at Austin (Martin Georgiev, Suman Jana - and Vitaly Shmatikov). For more info, see - http://libcloud.apache.org/security.html. - ---- a/libcloud/httplib_ssl.py -+++ b/libcloud/httplib_ssl.py -@@ -115,13 +115,8 @@ - # replace * with alphanumeric and dash - # replace . with literal . - valid_patterns = [ -- re.compile( -- pattern.replace( -- r".", r"\." -- ).replace( -- r"*", r"[0-9A-Za-z]+" -- ) -- ) -+ re.compile('^' + pattern.replace(r".", r"\.") \ -+ .replace(r"*", r"[0-9A-Za-z]+") + '$') - for pattern - in (set(common_name) | set(alt_names)) - ] ---- a/test/test_httplib_ssl.py -+++ b/test/test_httplib_ssl.py -@@ -44,16 +44,49 @@ - 'subjectAltName': ((('DNS', 'foo.alt.name')), - (('DNS', 'foo.alt.name.1')))} - -+ cert3 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', -+ 'subject': ((('countryName', 'US'),), -+ (('stateOrProvinceName', 'Delaware'),), -+ (('localityName', 'Wilmington'),), -+ (('organizationName', 'Python Software Foundation'),), -+ (('organizationalUnitName', 'SSL'),), -+ (('commonName', 'python.org'),))} -+ - self.assertFalse(self.httplib_object._verify_hostname( - hostname='invalid', cert=cert1)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='machine.python.org', cert=cert1)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='foomachine.python.org', cert=cert1)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='somesomemachine.python.org', cert=cert1)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='somemachine.python.orga', cert=cert1)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='somemachine.python.org.org', cert=cert1)) - self.assertTrue(self.httplib_object._verify_hostname( - hostname='somemachine.python.org', cert=cert1)) - - self.assertFalse(self.httplib_object._verify_hostname( - hostname='invalid', cert=cert2)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='afoo.alt.name.1', cert=cert2)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='a.foo.alt.name.1', cert=cert2)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='foo.alt.name.1.2', cert=cert2)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='afoo.alt.name.1.2', cert=cert2)) - self.assertTrue(self.httplib_object._verify_hostname( - hostname='foo.alt.name.1', cert=cert2)) - -+ self.assertTrue(self.httplib_object._verify_hostname( -+ hostname='python.org', cert=cert3)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='opython.org', cert=cert3)) -+ self.assertFalse(self.httplib_object._verify_hostname( -+ hostname='ython.org', cert=cert3)) -+ - def test_get_subject_alt_names(self): - cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), diff -Nru libcloud-0.5.0/debian/patches/series libcloud-0.15.1/debian/patches/series --- libcloud-0.5.0/debian/patches/series 2012-08-19 14:09:56.000000000 +0000 +++ libcloud-0.15.1/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -0001-Fix-hostname-validation-in-the-SSL-verification-code.patch diff -Nru libcloud-0.5.0/debian/python3-libcloud.lintian-overrides libcloud-0.15.1/debian/python3-libcloud.lintian-overrides --- libcloud-0.5.0/debian/python3-libcloud.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/python3-libcloud.lintian-overrides 2014-07-07 01:39:04.000000000 +0000 @@ -0,0 +1,2 @@ +# This is dummy data used in the testsuite. +privacy-breach-generic diff -Nru libcloud-0.5.0/debian/python-libcloud.lintian-overrides libcloud-0.15.1/debian/python-libcloud.lintian-overrides --- libcloud-0.5.0/debian/python-libcloud.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/python-libcloud.lintian-overrides 2014-07-07 01:39:00.000000000 +0000 @@ -0,0 +1,2 @@ +# This is dummy data used in the testsuite. +privacy-breach-generic diff -Nru libcloud-0.5.0/debian/rules libcloud-0.15.1/debian/rules --- libcloud-0.5.0/debian/rules 2011-06-09 10:43:21.000000000 +0000 +++ libcloud-0.15.1/debian/rules 2014-07-07 01:30:40.000000000 +0000 @@ -1,12 +1,21 @@ #!/usr/bin/make -f +export PYBUILD_NAME=libcloud + %: - dh $@ --with python2 + dh $@ --with python2,python3 --buildsystem=pybuild ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) override_dh_auto_test: - dh_auto_test - ln -s secrets.py-dist test/secrets.py + ln -s secrets.py-dist libcloud/test/secrets.py for pyversion in $(shell pyversions -vr); do python$$pyversion setup.py test; done - touch $@ + for py3version in $(shell py3versions -vr); do python$$py3version setup.py test; done + +override_dh_clean: + dh_clean + rm -f libcloud/test/secrets.py endif + +override_dh_auto_install: + dh_installchangelogs CHANGES.rst + dh_auto_install diff -Nru libcloud-0.5.0/debian/tests/control libcloud-0.15.1/debian/tests/control --- libcloud-0.5.0/debian/tests/control 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/tests/control 2014-07-07 01:53:31.000000000 +0000 @@ -0,0 +1,3 @@ +Tests: upstream +Depends: @, python-mock, python3-mock, python-setuptools, python3-setuptools + diff -Nru libcloud-0.5.0/debian/tests/upstream libcloud-0.15.1/debian/tests/upstream --- libcloud-0.5.0/debian/tests/upstream 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/tests/upstream 2014-07-07 01:58:33.000000000 +0000 @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +PYTHON2S=`pyversions -d` +PYTHON3S=`py3versions -d` + +cp libcloud/test/secrets.py-dist libcloud/test/secrets.py + +for PY in $PYTHON2S; do + $PY setup.py test 2>&1; +done + +for PY in $PYTHON3S; do + $PY setup.py test 2>&1; +done diff -Nru libcloud-0.5.0/debian/upstream/signing-key.asc libcloud-0.15.1/debian/upstream/signing-key.asc --- libcloud-0.5.0/debian/upstream/signing-key.asc 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/debian/upstream/signing-key.asc 2014-07-06 23:55:51.000000000 +0000 @@ -0,0 +1,851 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) +Comment: GPG for Android - https://guardianproject.info/code/gnupg/ + +mQINBE0wy/cBEACsU3YgpGiJO2E/Te4VOnyesI5FNXmbBeRSZnEQLLODbFTjnLqI +lFDustUyQVUkpP/U2daejcoNaQqj1OGi3o1Namhm3Is+FlhHmxc5BWjgcoOcs4tj +wDsJDa8R48obCa5rBBQiCkT5xv2bsJGtB1/3LK5inv7xw/d0dV3Ph9rxHGv/izUs +oBhePuaFQFl3XCMSP1jtT7nyZDk0CLkkcLig94mFM9zY5AV5qirehkbw/0vCKKgk +T13lx9ejVMy15AhO+La/v67lNPX+suJOqRdKTku0N5l0irM25fC30QCLulMeKtC6 +YoalsGm5g6j355HiCk92knOhH/hExdd4CDcz7kA9DVgnE57ljJuhazO3/oC/xrsU +g9e+CQcoO5rlTj02NVY9fX0+mYBC4CRPd1t1l+wFyM3HGr5TGm/OcMn6cKGll29X +C/jeB5sxuJMZVEc2q/bZLr0ekvqwwCUEZLgJ3Xa67o6eysvd0h3+2tQj4X2nksem +nCRmHRDGfJi99z/hA5rs0DDxtg5vS2/PQGvfLhrA+fzaUNlrtAPLdjwzYuA756CN +NrfrI1m1CGwciURPc2S9wGZhpN2maxZPuObM8MHKt9FVg0rUIYOrTDrKtrV4Z8qK +m/GxhSbut+bpGnk3FNp61LJgGTwpMc7CV0vWdNkRZSQPsu/tpM51Nid9UQARAQAB +tB1Ub21heiBNdXJhdXMgPHRvbWF6QHRvbWF6Lm1lPohGBBARAgAGBQJNzq5UAAoJ +EOXTAnP1nSXw+YUAoLgI0qW9qUAAc64f/SRqP+crdpP7AKDT3McJLFVnoLjjY61O +qItWyXX1g4hGBBARAgAGBQJN0Rf7AAoJEAsDrm5OJFF8NlEAn3oKFJ5AYeJZm8zD +BVxr2YfCuOaLAKDtq4DYU7xFcYKMtFy7omiEtbEI94hGBBARAgAGBQJRLZiQAAoJ +EJGTmI/nDSNXeUgAoIDNBs/MKwsYxKC6VPUttkPEewJFAJ4y5WOczh70zE/WhJPW +TNmxw6yAcIheBBARCAAGBQJQpdVdAAoJEPCp59zTnkUuJF0A/0zC7e/lHhhLBBJi +zYKNP4Cj4o/LpM2rF30V/G8I3gGMAQCF2F7kQUxAQlP2JWgzzMFjUOm7wawKgm6z +7fFJgch474kBHAQQAQIABgUCUKWq7gAKCRDEgbL/FhIoBuX+B/9fzgrxCm9tH3Pq +pII02mk9PgI+b2JUEx+FrWVnarK40CN3aX/k3UECJhu+XdZjqGnM0kYywB3z8cnk +XSCpv55jyVQElsmgwc99upX5cRWelqwqzolYwmD+a2hpgsgrAVFjkeyyJlV1USfE +fOqVd8HLjjNE0WuRay8Dm3ym9RsTsaz+QQNY5LN368i2yj018SxBhFdcNyU2hFMl +Dke/ub5FXhl713ti7JOYXYxlxsafMuFErtYXOxGloj+xjJPW1EODB/IMhruM6hm7 +yhTpRy8qdM8+hQzsJKmchSuuW76+Pkcr+PCOx71quPwowG5he3h7cSvau+qVLd+d +F/bEZKyBiQEcBBABAgAGBQJQpayLAAoJENB1GfrQvJrDVpIH/1lm1AvtCgfWY2xe +Y//C14SPko8tKZA5Meu8o12xkzZkojkpqyyUsxTUij1hUu6sT/yLIbzEd8EPIfdj +87eYiar1bVsQKhKlnY0n65KMbE6U5pFKKpGz+vXj4LWgBQCpxlJdRS2nux2iGjkN +pr6672v5gs9BS6VPkiQroiSX2S1KMEAczkdh5F/fSOvIVpy2BFK+w40dVyB3DJKI +DGjFfXdMbWKMFMV+YNUYNl+f12p8Azl9HDMw6z663XE/kfhIoZvwakZPAMGsQGB2 +iG9YbapmpRB1l9/ScLKz6nli4jT6NhSr9hqHvbfO/KyOS4JOUx7/HIxutfvSVr+s +0G0gAk+JARwEEAECAAYFAlEunEwACgkQHCOyNmSmoLr/Vgf/f1sD+Eoz7bKj7q2d +zmz8fEAxiFQ++b7JoAws+Q6gDSWWRYH6lEeQDsQGpfLryV4R9EH7QdJYrTztPHXC +LeD7aZXMmoq1pZF7HZmJgiKxxwaeHDnY7jN2gMp5tLLz7AV72JVozWP9Ni3qRfZ5 +AWMsTzcABSTuk+RQvo9faGD029GlWavlLXBV01ymRlo+bbB9EP4tutIUXQFHspm5 +CUt0dchPOSzg6R6FAIXvnsB9YJa2OlXtpFXRxFssBHb25DoL+oYHmbS9o7URMMpw +azYRfm5qKqWE2TBCWoo33HI01F/2GEU4ylrHDPpSMxkP9K5ZjTeyJ2hSQiorjXMp +UJtUDIkBIgQTAQIADAUCULlACwWDB4YfgAAKCRDxgK8Iv22caYvzCACOY5jAJ0Un +hiJNrJDBLIh/RSJ6jnGvKu7eTD4L53ypV67u3dRmBzfF1n8Iu2JAV1tZGVohzlQn +6wJt22/dpQlin5syn4UVMhref4m6lscaYGpCBR/nDN8AOzA9PDTd6VfF7O9+ONVD +s60IpFCuJAcGSQcumnS/hbXZiX4WJHBz+Mo27Z6WepVQqA8Mlf3pYoCJ94vLrqEr +X9Rb8AjxEk6+pVzwS2I4U4P61+W+81zt0wduX8jdFv+j5G+AR51kssG0DO8r1LUL +xwdMOVH/DzW8yoP8gJXGklCdCVyda+Wnqa7/D2CJVtfqKFue8VA1572U+Qj30zvX +eMAe71YLpVMbiQIcBBABAgAGBQJN0pvBAAoJEBDAHFovYFnnnHQP/04/xcW5Plou +rrsT6aHuH8GRQdaIBhLMtQuJYwXv6v2GCBXJl/Sy8TWfVRQs2YwMhinQnr5Iu9Ei +bhdoQKepNmdN2qstY7UucS25tVdKHuH8EAyYtD/lI9rNYgdXZT2Y6Q8MBveU6vXf +LRTYa6Qwln1CvTypbRhsbtlUR2O/TCQ99sH5xtJiNkaZigr882jJdf/pnfx/0Gms +y9sAcQL5ICP6UJsH+kdFOvZz2MKFU9OkDqd6zaHSF46SBKbyOxjpJAd/N3H9VY0j +bEPxh8UjtTHKzPZTxI6kOuHXJpYFGG0pWwFa67V9jLNJRcONJUuwMcR1mDe/0S0K +7rgk90vYXui8cAWHJ/nLLvJh2BeTjMQSZgHYJAYQ6L1aIkzAEQCULKqNw3FCFRTF +lK/MUHtrkIVSBN53UdVDxEzE55UnaCTjjM6MuF23OvtTBlMfHumPLHaIlIc86Fvr +jXbRuQUAXLhpdGQN0+e50MQcANh9exkU12WPNxPwDsDoRUxtqm1+VWb6UPTyqTkg +CltuNXj5sLboZd6K0/QKvK9ACoBf3QevwCTHr+k0fOQbzbuYcUMztHK5Xvr4x2ET +fS9C6hBtAlFAQa/s4s0Q6yomzdzCIyVgCo5naHTuhoYbwLtJKoofyLohJVDSGi63 +7mEeEtias+enHje8aoeVhqdEuffz7/LAiQIcBBABAgAGBQJN/o3BAAoJEFVrv/A/ +CgS2/oYQAKVbgDYIgcWAVSkwJX26humW15J7ok4AteD7xkuI/CqwMfkEqjNkPH2N +mhw2Xil9c5q0/JY8KqV0zD2si6jiNPpY75NTJcbnHhX3Q8xWN3MWBu1iYmnOATZv +Z2TOSQHMUwi2z73jx5oTM6Qlmv9qmOILrBzYj2oXKSM1rZAxsGfEuS/08JZUtuYf +guB+LU04SvsgrYjWFpYjqunszGGPU4SDAVIs0CqRFqPnPA7x4yWId5dU3N8bMqHX +BPvl7mbrjMGyFRZZZY+/4umHbKay91zqtCnRSlD6g3KTxf7bbJZz948aCbYinwa5 +AsMOO7/PSOT8SSGjccYDZIpL8Ab2gT5PBw95t7jRrLKZzmPJCRSLp6X7su5QX557 +duiAkijeB8XDdF7+dTZVnHLH7HeBIw1tAPBfPPVHEjS4AEsnjsspZffvanqITfth +i7ZrWsbQ4lplzbCPg61MHFk3J8VtoufioLBJVwMgifaxAwEHk367AGfiNZCD77jE +DeNJ/f1+i56w2Khjjz39gZ486MFvuzQpskBosgdwWgi5NPOsYNpqNRE1dMYz5erH +IYDtmwsc3+6Bdey9f7DVRkSsz3YrVHcGTwKYNPApA0i914zo2qBEgGkr52vMgZjC +uA8Z1JRs2EGJR0EiT5ibm3VqlFzHSYuyOsF3qEErtVwo4yNeJR7jiQIcBBABAgAG +BQJOHNkpAAoJEBY3Fz6KGiVIiDIP/351vmc+DjAtCx6y5cAdyoVaHbayyXSE8Y4s +rOUmBwGWE4m06soG+SaBv7hfBGeanJFbpLFKTK8j936hWAoPGJ/3PwVbOt/H9Qhn +bJsJhY1NJSPCrRJf34Luen0xI/EnJ0PsXLTY1YrVux0qAspXKGtpeIufIxnbccM/ +9K506920eZdFLYO6gUTWIzfW9KGLgIyp8WkdXKwB5bWTOClVsay92cwGxn1OIcpe +IK20JSdxCbwA9Y2y8lDh+SfU6JtI8FeV5vT55oV8th8rlHWJ+FHOFmQWZyxFUuF3 +SRkzHk1CSRa+88RAzIH4odg5iPRqo3o5kTbKbSFVSn6qwz2droAsOmhZAlMIYzvE +Zsvf2Dhd0RjGLPeY+zJD/ku5m7G0sWH1Sq6l7yjszp0dL3O2z87wjG5dXdIK/Av7 +jKdyRcoRSeFUqwuwbL4vwfwSNefeXZYj7vxqDFpK2YhMbiqRm2LT2n45yP4KJ9G1 +PS2hdVuwmDsqZXAhcMfmD0zsD+GigUznBd5FDiSpn2p2KLvSiGiLLFQh2ZHPxWF5 +NsZYP0t7EktupP0/XDOjh62ARBYtmOyJWp9M70RXlH0o5WD5shEgne5wTm/6Kz9Q +pb8LQ27htSPmJde6UO+NFt1kOQEMK75j6IIa6e3jQXeGto1Al1smMJPLBxjEHBVS +FCtNOJTLiQIcBBABAgAGBQJQpam6AAoJEDQP10kiCQO5gaIP/0ToDMLC4DIQYPMg +DcrScbtIwU7icd8w3nLh/ARVAXpHV8Da/1L0Dv1utdo+e6tPmxHfocEfQAU9GuHV +RHwQfaQs4GCZftgY+zwX5Nb7qWchhyOrWmRYsqQZU91+b/jYgqU4njt8mloQJh4N +cI9fnHuYifSAOZ8APqzblddRMd9/SW437hDc9pn0oN+HcVoieRFTiSZLp8iNe8ir +fUtcATeET6f6F8vi2r5DpsyKi52cGwx8krHdxbfXanec0cVVMBhrHuil/QXr46Ad +X3lDDWCg5jljSdL+zAx2ZIdClrrpskjiIrSjFRUquFFtJiVLtR3qBpgHU1260Hpe +U8wE48GsxvvX7Q8oTPNRm9UVKlMpSLgrFhj47d67z6enRs7yZBCeshAgS59cnDoR +izVwIcQ7pIgPvujdKlT2Q6KZjun5BD3vfTJSapweS/F+mnUItY1wRlV6XWAAD9AI +8tklfpmekvmDb1ztbBrt4dUzDj2xdgl9IxOpMIqIdWb7AqFl2VgRAqQp/nkE1aO4 +K2b88Mts1L7BTjdrolDxxFQ7B6zp8HHOPb4NwyWHPYfFd4qPlSCj3m8acI9nXZqh +fsjwJTouh+7Y8k4wQmNcClzGnKlwO1nzZa+5ZSZZ2WFztC3nM5OKSCKKfKP1QHtq +AXr87YAcUFOi0I/ZywAW784P5U7OiQIcBBABAgAGBQJQpcyQAAoJEASYUj0W8TSA +LNAP/2RMbprOQO4JSHKPWALiaV2Kc0yLBB7873QRl0XIevWp37RlxHhqH8CLzncI +pFma3ykgELLEsD+Cssq0rFbAL2SJ6NwWSRHauZcOT7JWaHUKPll8xwPaoa9hX7rg +ax6eIlYAmGbapFNBqYpoN7a8aNkM1OUg18xa7dwiePiz3DPPQD96VIMgzRGwaOUu +OL6Kd+2XhzjoJF0bLhmeurBGygHJlLYCJYIqZ6/IX8+fU7ProzGaV+W0aXfVojgR +q4Zrc6PDHh3w4us2fuJdrNmddrnhPKDR00dTqD5oIHxnDscSGi5L3n+t8Ck0yUg7 +IYgvxKszVtcE4RdrFcIVI+9GTrh33+/TCVjQTqOvZ1MAj8J/G9A1Tnjf7YxRmL06 +k9RgFxGiwbzot/zKNctn4F25Gw60sCXyD9JLyrGN/2TzIUXiARhVWPt3+YArkF+u +pbMXFdwRhDEXHdL0I+ei52ISfyRAnTeL8ZteKLhyTVvbBmTF9pfWhexTqSuj29Dn +7rgMtmJIn1yMyaBSTwXGPpdCTpiofItGwQ1Rrb7AWN0zXBIE2I2hGY9pOTW9h9kl +x3iD/1fRISq2YbjAPPZjnhKAb150jh9/Zt9nSvyBzJv3pYkwYt7dkccyVKR4GIr0 +Mj1zH77UBH25vJcz6GpYUmWDCc3B6lkGcHYuHK1Gn55S/Uq6iQIcBBABAgAGBQJR +LZinAAoJEExw8Ghv5Q8crTQQANnS5CB2sUYLyiH6+V2BYFNq4AG4vB1eFirOtGEs +DH+HXyuvCaWj5t6xmjCTdFvAFd8GqsDXpC8HHh1lkEafq0+i8N5wkePHyasjClwh +8GFwoQQUT7J1SDoOCcIz8+74lBXZBdHn1r1D4hS2BgkD9ohv0gcA0yp+7M9xLhxc +mPaNgI0BWtsI9imrKrDI8Kkki5NUwEqyV9CxrXD7cZnqvypSiaJQbknojv3aCgXj +nbZmBvp1M6miD+0QgXqeKup0IEekbSvtuGg+/izgAnzfESLQIDY6xGP2k2Ptqob0 ++Ih77IjkKg6N3fXYxerVqqaiaeVj+Cvv2qpFw1O09/pDqD12Ve3K8n56COsMvsL6 +YckJEQb2+ZX0ttBg63jnLP7Acu6P0R1UMsj3iB9M7r4dyP7U6VkmnnzZ27y4fYg9 +o2I8m4pbuQGLTNbmI84/FiLDbLAvpcFSD/zCd+Gvz7WwhBjI9P/aOuY/wsA09CCr +MEF/bhHG8219edmAeA+mxGGqdh3dgQwAhS25ahepoXDN8RY7XAMxXu+Q36YqMcjw +p/MANe0fXFPo96boaaRaIKGRBgTiLS/ut/ChuwtJkYqvD8Q9Dm5WM8Qe1v7lPzzZ +m6EuwSw5FmU2LUM9dRIHOh38PAajSX/Uw9i7rmovfgOoLW+I9e4kEhrOJxIFoifM +M3gZiQIcBBABAgAGBQJRLZixAAoJECsRil+hXzC5piUP/jJvLTRa7f6ZijzrH8y/ +LbCCpJ4jtQq80D8VBs3DL4jRligOhSoOx7NXV3HNoP2AYC3WEdcsAbmBBb31YFbz ++BqI1wihbdK6ZO/r13Kdfsu3Dd9N4jF6YUGQneWsB3IHtcz7Gh0k4GlQgdDBYROZ +tUGMpHbFxNbU5qnEoRQWzOFx+cXvEvWpjtYxy3XoI7LPzxproLNSfLx2I7MhlIZT +3+iX4N7YltLPRwkcZYXgdDD/cGRapavQU8vxlZvzVcWzW42YgtDaCgYjMbOQtpR2 +UQAWra87yYakblI8l+El/uV+k8wdASG5CglOCSTiVKvQP/AJ01yNMIjeis2c3Awy +pFhiVPFT11aBRaawDcQcXJPTyYyFsJ/Weew/DzEwFnTErA5uNBwwnvu3knVwt3dh +bMXuS4uzNG4yQKHgRNJaiuB6RSugMjGlOODXYhNL8fuexdsR8xVBCf7q5YAzvBi2 +CwmicncVTwPxny7qkmXrBEQchoxt0EgF9t9AsNCzuLgqc45WLH4L1/843q0M8omT +2C0eyfPqcT8dZ0wX2UPucU2VYskTj90oXftiGrcSECL1nuCDUrDhNPvbSjc0G60A +3XMlzskbswLHe8EcT3WKYFkM+Db9i4ciTmwzOA4HYCRu3W6Rz7w649BhUr3VDcyH +75dJHFihCP9N5I3gUEKzMQlGiQIcBBABAgAGBQJRLa5BAAoJEKunMz7JOD1D5lIQ +ANt5NfV7mkAFFVYZ2F6W+YGERFEzKpUmxXuWFerUgAPWF3KQcgEo54ve9OGAxgDG +rVhxHytSjsmPusj49Eg7BoZi4UB+Aq4JEDenrBO2kRY/DIXCiDgKLub5TUoL0Cp8 +PioGSsXN0MA6s/htuzkWN06SjrcJZk97F1G4ru4bPK4Zia4Ov8mdHgHeqP0pUViy +0glsRoIi5rV6LRVKGfGy9P9OOAan6TZ2gjOeLR6oyeZynP0vVdC5H34PaoMiao1x +3qyv/lKQCTCw9e0eYXsCVl9JclbHuIrM0raQECAxBc9Qif8K4/7rQoKyEvC3AsCB +LIy8MH6t6DzgcfrNTursIJTBY6Dpuh4W5LVOqhEKH+rFBCfkS3dCGv1/0XlBriDw +6+JcjAtfL0n7MOKH0a7TzFnPSTk00Xnyn/b31UwuuS0yNzdroumnKHFLRgQCDREc +azpiaaGHrk3JYd6yS135f8U7nB/haQSiH97cB6s18vPEsRt0JadsMAD0/JcpuVAv +ovie2NPc7r697uH0U9EPVs6x4ig2L0A2xD/Hp6Ib1wlviy5JInlqqyqtw15Qecg/ +VswaKn3iP12EID4LF4SJSyFhuyrSi1smjIEV+MiQCbK1f/YixnaZN5QEgMCcpIEI +dXahraJIKAoKeLRxsoANFOkkWou6vH1qa/fUteG3NN2riQIcBBABAgAGBQJRLqep +AAoJEIKLGiNYahCeHDgP/3XC44oG616xzOj39n+t9zg7INYjR0gRpuKn++TkMv93 +uX8N7FnUAmL0LCguBCWBQGtewjnP3Hz5OA2TITICwtxxzIC1vehvlu6qVx4FiRez +i13muZnEf1WM9cJ6Dd9bg2KxApJjc3Y4XuRY6tuKj7oetd5ksLZg9J/ZJZ01Y8rf +lWWambEo0w6IqhJzU+V1PhMf1C+GuH/EuENII4wwZ3KGrof/GKVwguHpYTNWqWtB +p1jgY9BSfxB8agGYR+DrtMITusQKf2kWhsxqFGn/e9xCUIkUiEK4LPLazc1xisTd +mtM8bHJYvXGiqJVnJZL6MNlP4alEb8o+CThvxdtJQ48ry6Aw096MdZV1rIoNQS8E +OIw18vQ4/2EpVQkH47zKmUQSp/un52g7tf3JQCbITYtc4/jH17nuO0HPPrauziYn +/hMX/yi70cruolXY1oiDKrU/kK9r5zErd9N8trPtygkvluzaxHDI2fidpbf6TC1/ +ND5SYM5uVXrjGln9xOtFsRfu6cVu0Dzl+bwcY/jvZsYHZmA6ES/Wtc4zVPuu+zwa +CteuU7QXLxLUwyVkO27Zfu/W6HMs4/VkO6+xuMAhuaX6aCRIYlIGltgvdEdxdaWX +4hmEfviQjPLx68U9w4qO37OrW2HrbIvRial2YkA+Bd9r2BFIdJwtz7ADpkHBSEs7 +iQIcBBABAgAGBQJRMCV2AAoJEIueQ3vhtc/NqhQQAIsnM+7hyQL63IiEDNVZDQ7j +FS7ZETv+7jDnVR9xtLakSm92be0towshbVaHiKdYa5kbTN3nJYpvQqeqib1l88DG +EH1v0mfOnL70OzuLXpxr9xIVhbQKDXYozbH/nOVB1BpAsWh5pX5F8llFRgYlo3Q4 +hIpx1tsPjp5osK/abbHjUOqUy0o1iX8RhUY3Bi9pS8PWKAlsTYqPcgQB2bAxY6K9 +K89Y0ncBbke42nu+bEJ0Xg4dHbq9qVjil5qyzfJeBCDlp4uuJSvoJlUkdd8x2qYG +1LcXfscKIw5pwEow+h2C2hatDIDAh4/i54WDlEo4BUDlxHzi6dTrG7Qqf2siBZZV +oGKM4tNeOONkDuLARn0Px/UHBNTnHFozqliwBbGwc9tAJqqfBq5VV74HJX4YCVWN +AtFyoNXY4TKfbkaQFPY0xt3kGiu+EN7IDBgkg0kwHiyQB9boNwdrDvrYApWczBBR +/SkmBDYxDtpVW26ViaUiRSM0sZacNcgfsXj9nDAcSmnLGyFnsC2rqRu5rWjbgPIU +S1eP8OyQO4JGAyrIkLZZgnk08t7N6Uc0xDXKprCpu6MJmsDVJaMSl9LyG1+4X9nS +PrG/rmPn8wT3sGzcaI9J2ADmTN2Ox+p8U17S5zFKqzv21JIb2jwPLjRG0cG2hFap +Z8mVilGFgDX6Ak3MNQF0iQIcBBABAgAGBQJRMK1eAAoJEHe2tpqeTcxreeAP/iqS +nFRK3cJLh35/jEuwOnXuxEDr5KB3uUrP9Wp7DTwNa4NwlIhNIP8fM+Lnu+oBVhWm +9ULn7qfnNCwNW9V5sOAB1yn3AgCy/8HU753w+ZmoGkpBE+vYx6VHC4FY6uBH2swX +N3MkI2yeXPigtbnLcK12J6v1HZKqT0XKy4ra8g/tYZxQ14b8ssK04bnV3qK8vSjK +rLxy0TQ/R3azEf/XK+oSXC4dRmyZBTcH6pkwUZ97qLQeH9m7VhCzrDL/SNEDtUuZ +Mpr6reXKf56FL1ESxwiy10nIMyNhOYzS5gm2OvoZeR81F/+DITncQZilPduZIL96 +xuuxM45g47dEFiWfD0EaFZYIJvBl17pPsA+L3VWrju9XyS1VzvF/9pY+6VifFZjf +XEAiiQ082YlxTASoDJUjp5N8qog3ZNXT7z5eXI1dvLmD8DiPu3JXy1G/XRzRd1K8 +vuLCQpAu1PqUFA+7gi1EJlyqrVxnB/clyLV48fDnHI4lbqpOz0B8CdCCkPunOFi4 +q72VKDuzoUGr03XGYUx/ij+ErAaHVhgJaG7iH/kDjZdGYQV19NBS2iYEndSLmnJk +M9I18VizIv7BxD8WNFj9ybOV3h2syFs0SVSi0FTQ+eJPEZActkiONmUZT1RxUJtE +of/vg+bsbpD2RgjqArfKAQnOWe/28MHlPm+LFu4DiQIcBBABCAAGBQJRNR8AAAoJ +EPOtXJSmf3B+AZ0QANnIP01vY/wWryqaY/vUvNzGttjjixi+w/cH2WgmZyWdw0cd +k9Su2F4BlPQ/VyVEA5C/RnZkOasnvkaMRmc/7f3rpceW0UXEdCwU4gJ5pxdKSx27 +qogbcmxj/pxNHTIkTXBrmyFF6u78jZYjvdtDj1FS4IsqRd/2qp4Yfy8mTWDGH0u5 +slhIYag6DIh+0BxqhfPimr3S47Y/OUSBrEuEnzZWDjM3QX7xfax2zEz6MMpZ/Rhu +kBlNno72cWq0bq2SQ3BfeD1R1Qpdwb9FCtVjcRfzw8DVcQwdir1JPXKrUj2v4p+Y +CA9EiJTItnadWmQddi8niZmP7x693v7bLbdl20xEigYO+aHO7+miAwU2KYFO+FwY +dPA88u2Rmt7Zge7lHyHicxQ6DwA14y4mzo9x0A9vDnJI+kNDPgancKsgZN86eBXn +efx4FmLt8cx8otmTTfeROVLFnEKJc0fe+gNvh/1e59aLGUkxLnLFbOBVf83uNg1p +tEMJVg+58A8xNdMrDySJFknBhaNueV5dqLoPy8ZOw8HSclIqvrob8PceWRlhEWUM ++VQcLBktB7d/59Y1OLYztuY72ClLyvw03wv1xps5Fpdr6GahpE0ILCZwBlABYpSP +dGuOaGAqADJlMZiw8OPqQJkTWZe295tfTukkt47UuHbTETwKZZYD9dxLtz8TiQIc +BBABCAAGBQJRSiObAAoJEPOtXJSmf3B+WUYQAMlkbGKPgByZndeEnIraWP70b4IN +HYL1rdme1tB5m++2BvaA0hm/Uj4qWix4TNVwVIXki6jr5m/WLK9WnPNyoC3qCPPr +9zueABIcZKkP1rEcuTiEm+/BIzz10XXrE573pcip7C4lHyUPeF+Ogb935s3Mc4If +1C+HrsIMwdOF+PXK3trKRKAbwchD2GkyfH92QL7VMxxCQw0O8V8e5WMVHV1vj5te +Da1VNg4Am8YDGteWEsQVtvMW7kb9JkrIFEkLNj3PfSrPNjRBMCB6Ktl0bosA1NgP ++H3d7bDaB2Ub0S/TmeNwF81jTON8aVAPd9/YDoWJ4LhlGegbQNuE9Bn39aKlWN2S +2eRUghZoqJ1noJdn6QMpR2c3crur/vzvw+xc8NZAsHOw+4V6D1ysB6uW8Nb+AAfy +cmT9tjuTyjI2kGgCHsDcAavw+LhF4vy+rbCKfq+H3u4R/yprY8xmK3Yd0SmawaqZ +JcC1n69+sECVidfBj29g36qgdzWgm74ls+aXwqz2c8mD/aiCvTUa/m6zcmJRVaeF +lvP7pl839aoKVa1qHybvfju04o8zzOoXwCYB5CjpMVJCXLilkZPtopXl0Q1LtRNG +gcW3RMOsxO6/8tL85gyCo309KEpDGcnY/IlU0GwSTwTuuq6SPjG6PfU2JdkKuV9U +8rzvNm/59NjKLABHiQIcBBABCgAGBQJRMwxUAAoJEG8M2ucAtomdgJsQAMUHqDSj +S5Lf7r4Et+uzwTdWYYXshunI+WLw/bRx1LVUSmMnGlBCPV4oEsZo9jQqDy+9/DBB +P7pY5ohLzXaDEXy6rk8xNXWyYgmDukvXVWKqAE84ujaAOViShVN482KE4Y42ypDc +avvWuSRuxBjXBPViyhk5T0fPbHKNONs3T7fq8aw61R7/paijmif9QMHRGw5hWmtU +st/DQRT1Rh2C+kczIn+mH0j1VBQtAT3uZE/JHDUX3wFHTb/m1XQ3/s+YA7CjT5pI +EuLQTLT2/UjA1sAxSsR4OEUQCObJVCptU2iDRyOK/NezFX7pv7x+FrZ1J2km0wMJ +ROLaxTRw2HFZCKghjsSL5ptRozB55CVnJ7pNn7pRUcycjJDlqNsCP8RkwFuMU0vK +fVfTPoJPMMCo4eDdsqgZKZlVVx6X5B1MC1S+Luecyqw2pOr6iJnHlJiKZdo7NNZ6 +lI7ZoxfxgxSY1fpit/y/6DcCMAkaXJxU5wlMb5/Ifz3xKaFs1QoSW9zgjiuGSWYR +QSiix/kLAZ4BPMSiVZvzk/4Yq5/CYOzHfJNufPrukbbfTLe4UjrmVhZ+/mstgxfv +60xMFJX8qdUOmqAYaGMJRJCNMwNJfvS5djjpHLEQrSVymIcLdoYo8cC+LPkGS9jF +3TfxoFOMbYRfAeGlhJf9AWZ5zp12V1NJvaTGiQIcBBABCgAGBQJRNAv6AAoJEGLU +j60WoN4BvJkQAKYcvKUQni/Wdy6hI2MAHj5o5TNSCFEVRv+wVf67RFGQt7y3XbiV +rkGgmbeB/cy8q2dH03zTRgnwbtMdoG6ezv3Ig1QfyHOLYEL4Z1QoGoccb6Otdmc/ +7W9t9J5621xtyhyKPn87wio8fmYeVrFTzpG88Wva7AFTbg5GHgMsGc7eHg7QZbYl +ZG8/CKV/ekUb5GrgbeUjWvSzUzYR1qi54eylqEeLFUkzr7hTnI+s9sSufg+Q8tpm +ssaj2ZGrxX6u5+Xfi/pCGaS20/Uc0K9K+VT5WR7Kjg5n/3gXUwG4GcfOKXpvFMv0 +VeAKwpdatVE51NfoGoNOxoxJq1n7VH936XXRBTu22huRrLobJkZt4rv+P3ItOrCv +ELUUkyHc2skZa7ExtZKLwOucXAFrH6CY7NzK+I7kPafgwS9Hv3oGqusieXVoeFkq ++T/jHB00e0IvABaVgCuKaWi5ceBEQjE+8baYX1/i9GJl1xpjS6cGAEI4xZIsVpmL +df2DzAV3Gg4LH2cEQzByDi/lkfztz50/TecTnycOugbU46+TFufR73HylxAonIVg +dzzRAlR6ksoSI651id2mlFD85wZqNSYDhqcfkIobyfon3W/kgfWpDeTzngo9HzRE +l5gFj50KBJRjDBHEH/lY1iTYIcxDpF+pQ2QxRycfCpB3SRPYotm5GrCCiQIcBBIB +AgAGBQJQqDUSAAoJELV0iWeArwfT+l0P/30mMWCh6bwngGMFwMS7ZxzC/0stZxhU +s8v9qDwR8yoQ45+6goiNaw+au6w3sm+iljwcijh3MPL8rVxHt4ipOWwCJD3kx0YV +CBEXYOOaSSQCbNXz+TCjOhe8y2eDhGDwUls77Cp0vKszRAWATUK6lPjm3ny3PJmx +U+MJfg4MuOw9TC72HlCICKZ+oXPtrCZeJMXPKGldY93IEFrwOzXsaKU+4pZ7xNbH +udrrxMgKVWcjsu84tEOHemsvtfJvIZbHENR4jpGOuW7XvPIzX5l9+5P+MCGiuec6 +yWqrUuHzyOpTYMy71+JuSrPTawy/0AWrB4nqq3ZgzyhGyyXhSHueG5+utS9V6frc +jIO65JxGTXC7hgnd4sj3G9qxC/pYTgqAJmoD+udxms4NE2jP+1NTvrhdhvubjmX2 +ozMsRw6ixpu2FuPFEUz4mCyzTXjC41WdBgEgsDCgNqNOmxVCpfeQbjIAoGEowBa2 +N47mJkHppKNs2CoCF3EeNAxpt5rsrx+F6hwZlbmjd2zyhESDl1o68WDNAVA5HHhP +G4q4GALv13eS5MdVgIWsCGI/2ECr9nwcmvsnK9YE5QMg1534itua+2FReJ7+kUd0 +Q79kkQeZzu3kvT0oiFFgCx7uE8osuNv7AkB2c9EBTYGfLVO1sWu5ka0RdiRxTvnp +R6VsD8/ZganyiQIiBBMBAgAMBQJQpaj9BYMHhh+AAAoJEJ+P6v7dfIukJoAP/1+Z +8tq1Uu0DhK+3ESG0ZHGj0V666PT4GQsZRh7HKLzBrcVuWOpvOqzLoBRqyrF6BTZ4 +VVRMfA43RNK1k7u1pJScxs4ug68xL1tWcAX2TJW3feGBhQ+ngRgY4hZ5any+9mNi +KU+J9ngAch3glU7ml8M00mVg0Kh+lxryi2z83WmWar1YTHePtI9lHJ0APoZEMVlg +5F+nWADBZxqjULRUJqJ8UwlbUH4B/qdmqrMSZ3FO/uTklhQ8drUY/uLN9H6/7Yvs +It501Y57TVLCog9m+uV/sAjeuoCHH3RvKDwfnHm4OAopSWA1hT35r4O3Y66/cLUG +CSqjqOsnqBRdos68JShL9p0r7heRpWJVznPTfs42S0Nv0oHpjwgjYn8F/ozQiCfd +nZ841WtuhkYdWtie4MdZ5BVQjCKRcyZZqsM7dJVrDwNwdrbHXvbKIFVT1X1+pvt+ +NJtqEN/ZB4FZku8lRVLdPwBBAZV6EK+Bzy/FN9B3ub6pXEstX47YFuIyL7Wd099Q +MuNcPXTAzEhGqgQkWKHwwmwZ0Mr98zRDeXpdSag8RFaYghv4k4GTBswcTO6Mj3pr +NPe69MAhro1TSER0VfYimVU9DrMuU0p9FuS76TSMkFi69KM1rzNByYVF1EoY09tj +JDfFRM/UVxFNxXiH1er4nm1YmEF0YPB0hbwSK/x3iQI4BBMBAgAiBQJNSLFIAhsD +BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAsB1SyzgaS80lcEACqAeqQnF2l +EwYtUgUROgXelt8f16/OxJS0E1DjOVe1llK/coQkUZQ77YZb1m+SvU1P+59+pGKb +Ez+vVBOsu6ShzhVYSJgxCrgzz8sMcZHsG9m5fbG1BfrMZORJIR2HA957qM85aIcl +WEPVVHBsbr/w0iIaUjkvuQj0Gm0H/72mvFhGQxmpxDG0h3+BfabwM3WsFFVu1wQs +N+27NnD5JrsOgFFwjROvULxQ6Paxwn18T2bVRG401O7gBMoqCHd/yeVi01wCGUC9 +Gl7MKtYxMyv4zM+slfd7twsf5YhFwV4Eozctr7fESkSHTrYhqcKVO/+sHu/3cCwq +AsYsGpeVO+clrkph2BlvvmcL4xZ//VF6KY+SxVf3lyGyb/8ioUFZzKIrOD1b3I8U +MnvhjxyDWPyP2oTYRpWdixu1CikNRNiQAoTuKUhqYt0JPcnT0+pxzZpa2UR6TnvS +V+S/JJkAChhkh633rRoLDWjpmLTkhDu4z/QKFkoo/F5ZLInU7F9h7gV8NF/dLC9/ +T7bJalx2QlnFr9NHIDGlBGkG2+tkgPHZ4hoXwxM4OpUNvVffyTPJhfVLV43UOd0l +uW+lco6lghdLuYFciR0v0sIVDYNvENRWu9V8pbP1KwRZMxn99ye+DfHmzXZPsFMo +bamtq1bg3QHn9C6kZfjn7M4OJ+v1A6m4DIkEHAQQAQgABgUCTdDxuAAKCRCKr4jW +2E5BrmdrIACuLgHuPzChzJY9vqAVBFTGzDjC+l6mBPw3eRzfd38EX8QV2BXHGC6X +3W1eVgvw3A8eapL9qT8O8FkF/UCC7uMqmRWZVN3bfDe28g1bcsYXlR6xM8DIMd0m +QxzAXlOXPUshKI26RPBukAVFhz+oKedg66a2wYQdSu+IcEAneVEi3cuyx8WEeQMj +KSuTVS8mVt11DQ3CY15l4f4eeNcZenzjIF1Lv/jwRuPf0MS25F9z8++0/I5Aip/v +5w5pWpsT1TNtI99JyY5nTxSV4T3lc082bTCTpm5PyRmDy4zlVpGrETGLNfKfS2u6 +ihreQS+mLApIDLnPKOLz1sBhQHUq5ZHaCP7JLV1spHfN2MN+zeJwEuluB7k7FLo2 +RvcuEK8PH8c7ZqHbfiQ49wpdsl2dOWIzEpgBaP9HHfmiGZlAl0GyTMbGTkmPRvmf +Mt9RQvcXYBRnIDWMzlfvVDCC+JKLwQCmy7iz83NRPInUwnSmImcKT524ZcMGADtE +0k9mcj3ZThaYjej+qKWnxy7EBJdY1OCXARdK7Z4O6owghKdu6tj3mXLsGadSf4Tc +3jDqKt3KMTW3yAlobC7m1hhMrckkQIIP9gpTwU0V06ZQiwk+nzLipz4LiCAePkDN +9UlO8+xsMSw0vDDyu2HxJJrOP0rn1HqJgo1EEW2UWII3C8U4O4m12utNOQGGmp0g +riBN3V2mpbg36yYTo9r3glCcA6NajU3hisQ9/4S73rLEHkB1uSLMKqTl5YEKjhrb +4+/L4pIshSnNSV26kkPR+r0qyLEEIY82hJHrofyERc88KtzWveHQ/BuR2i5a7XGq +b5aKCduNRGImWCIOioSOUl3okzfegcLYe/UBmQermPwNwqiPR+usvZK/bgSB4vYP +roiEBTrVZmCP/TVL+S4/gHnWhnDTSMf8UPZhHMezKAzL6vp0T2dv8EzO31fSf2j7 +GeH7Ml+v6lqM3s86dEBrsSgiNcYhZze9HHFN8XPqQcT68KUJLAp/AtRIaGG8gx30 +TNAL6lYtBhpKYs2h7GiYLphfbkJs7FjlcyjMkkNgg3ML5MSpYckhe6iWsThTtFrh +2o0gdgp8OzLVT9NBb17AEiVTHckP+YrxTMhvRUh35ja3qaFkuuFiEGefbjJSTcas +ZnPSMzsfmDwhgrvlJt9QVa66d58/5ptI/Gf+HA3Pj20rDgi1nYHigEK4/cehl8YQ +1tAl2gm7+kXSnS1iBGsOam+eojNr5/rbuUILJd/A0Nn7TWh9JAmZGdBGGBkikZ2/ +LPhTmk9fGCAa/E4is5A3NMAOJgjWN49mRHSRgzqLiwBdnEyNVspb6oUN32gyphfw +mcqIgZ43JrCLTE2q6xMp4pBMoQXaPOBAiQQcBBABCgAGBQJOdc/kAAoJEO24wIKm +7mkIraQgAKS6PuOYwyS2NxMeoLZlpV2slvTEHcR5QEvjMQ7S/k3lOjrC9v7HLC7z +C9HgTTbROvVPdv8iSBjxDHBMftN5S017MbnjRZl1as2RHe1WKBj8WWQhay+mZyFS +zGtcxCJhS6Musum/r+++GIi6c5q4d/UuCrnzSKFjuwlxat/P9fTmvv+/t9pnXZO3 +3RshRmY8/Mgrzv7AKh2RN5roTreHE70xuhIxOBlel17Z/qwHJPSgVe/XlOY4HAYd +83xhXqJlz5ae0jQeU/nVTwYdhBRiF23GrvFZGnDRpY/c/sMdLiIEN+RovccXPG9Z +g7pjsIFJZhe9JGmS+s/eMP1K4Pmkh7W07H1uDxSEASZOhiQlFwRynblO5nCizfXs +SggX0iLzigz7YRjHTtLubw1K6+pypSOARk9XP8JZvNg+9+FKvgJzbu3kW4k1WdwP +TAC2v2w5TXdyOSKa6nYHt+4z5m+cwBs27CE9r72NxozS2nseArwqCIUaFtoBDbeV +AHYBKRDs/oE1W6pZlvYdfgWb6BdMFEOQmpAkbx6JOg6FNlkQI4cDm0VKXzk9v7if +wztBzWdfw/FKeMBlfHrYZPLyS+M2IWhMzBGxxjuyu4t88hkY3FTjA5rMuow5BhhT +z+g1vmsykaIETLp7BHZjvxhrguyVQVbz7yPgFvhr2jJSOLJYP6QPzZhW/ImRn27S +u1jno/gH+wubA08hH9cXGjBSxib0ndFmCJ3fkUlQbiYUgFf3VfD/ZazBa9f3Cxh5 +LzDf19jvmQ61ij2Qto9WjWsVlGEyjbFEfvnoB69FQdVy9ldJr4fabaXYRpGme+AE +tzUfZM4gCn26kgD/EV1V3JsX0P/CGjmHqQQBRc5UMapc65ru7A8SeYS9TXFNlTUY +2eeYfW/G+6mKRqGqE8Rl1oSsI+M6PK6bGxTUmktAMK8j6aD2vTrKkxsHyJDRH8Ot +M92yU0w52Wxy7KbQrcuCLUb05e+WVcesQgu8w1Sjv6zEvoq1GHbgYdcn5vNGp3xo +zAsxvZekckL0m2suX9NRhXPaxJLlN8m0SFEV6v9oVHSQ9mPbXow3xxdr4sdulhNd +1Tzxr2D7bdfhdOcCbQiaej2uVzr0vvDrmsaM5CEW3fGi3hPEf0k2g4e9LQSYK5qa +qugiCYTw54qh0nr03Fg2MpbmMkLiWuIHLrtbpP4h04nVKk8XOcRofDQmmwpDZwBo +5JixChgQqyHOw1XNmHEtMp35BONMJ/a2OIbMKCQYhVBW4bR4bHtl1+k5uPq+wPZo +ZHtZSA0lmKIsZAI6flkOuzepiDOofCEG+zvZHczfpcR4qpb4hp2UYbHcvqDUspy8 +j4jcqTUklnsgFUfU51YTCTp/jy+18SK0H1RvbWF6IE11cmF1cyA8dG9tYXpAYXBh +Y2hlLm9yZz6IRgQQEQIABgUCTc6uVAAKCRDl0wJz9Z0l8Jl2AJ9q0SkR+HT2pNkR +2H5gdFokYwCySgCgpr38dOBns1MDDjD7JL3uZYRZiaaIXQQQEQgABgUCUKXVXQAK +CRDwqefc055FLtCSAP0VwT+wML+PaNFHrmdzQVjKJY7dJSvMIsqRtnpU/vUrGgD2 +O4zwNExfKWQvwSpCz+HVyLF1G+ZRDkazYcXAT3kp7okBHAQQAQIABgUCUKWq7gAK +CRDEgbL/FhIoBgCQB/0U3E09gyP6ZrndA8K42gtQR346/N6k2F9p3c7+fx+3J3LP +84CvD7evCQ4+C1ybJBNvagY2/f4RxSwBh3+s3jjL1EwLb6hWGRE7Vftgu9fEpmCX +p9L237ocoRazsWi2RQSlQ0raVBPRgaF3OEfg5Jyj43LcZKyQ28Q971kEYzZabgBw +GEJIgJRRQwpHRSdoLjYW7MFLSDI09wKApoRwqaD98LrKS4a59xU3Jkoma+/3nLy5 +ERVQ0HWyTJV0JwaGK8N41NAzNizgQd4/lcB3DnPbCMyqE2Lx1RZlF3VmGnbsFO04 +f+iW7FrOEoifWBrpcZWyCfffn3JQyvm/EqGH7yUMiQEcBBABAgAGBQJQpayLAAoJ +ENB1GfrQvJrD2i8IALzmnTK3b97MFMN7161jyfpNCibpuzsmxZhMd29YMt+J8V6c +ePWGvGYKxdMUr9SVAe/HMpTp1f3bZMlJMkS0RH1LPJCU8xWBka0FOND4d+/J22bN +AQwEX/E0BHj/DuHV00s6kqSIsXikxg48Cz//wY18ajHb+H3z/eFT5nspGpUn50jG +w0voDfJZgQFElaDukNAa8wIsIKSjlQs0+XEY9V9drkpiFmMMvdInB5yFBXc8bqP4 +6fNUi7LgLPbJ0fRRU53rNmIPMNw+rQTj/rs6XNTxLK7UWqXyvmkvGoxpdZR5vu++ +xhFRJ3aQ78wlWbLCkGM09E6tsjJ+VlvMScw1B9mJARwEEAECAAYFAlEunEwACgkQ +HCOyNmSmoLq/2wf/V/OJ0ZlLbX0OfO7kbs3Muq09FsTyA6dlwvJdn9/KXkeYWsjV +vqz0O4IqCXsMGwV29De71wCf9kBzY4KFsnvmUTWDqDl7tH65ki5ZgCiPyI6OV/wA +2nHhf/73xmoD4KpmNwQpLWVXcxHAdDDDifT7ug+rCD7JO8mFd/K8fbbHC3jiMCLg +91oX2qfHYJaV+3jcbvmv9xrJLWOBWh2nI42bip/SYT58+ui+Wku+9fmZiGaaIxLs +s5HzAEX7gATehw9eLkKexbGQxtIkoEKfLHj3s08OpQ5fBWpi0P/LByTofsi85/Oa +bEMGNeKDDI9gAX5Sd0niA59GyxRdjLY74vUKV4kBIgQTAQIADAUCULlACwWDB4Yf +gAAKCRDxgK8Iv22cacjcCACpRU8L7Jk2lWV2/sq8aiLCAurG2EXxgM7TmBa/q0BD +xiaE1miMvA1fgxXgpVQZSwgkz7xshw/gni2E2Gtjgl7HlWLs4XcKcBUc6vtLq9uF +eo1FjTLPbtO17ySTDklryXOReeoAEMtbPnNtzfnoqFMWUV29P1BAe//vQRIBbbGM +zyZ3ZHCYFl8ropxPso5FnweR35nelQ+MhDXaJ2++niR5s2wUvylSIX7b5mG51pTy +zhh00SVf0ufNQufQYbib4EFEboKcjp0BQ/NKJQVd6m2qkBv/2f7PgUrM2b38nnLd +23mfYX37CIhkacLcUkWx5rnaJrtos490nSzELFDYULU8iQIcBBABAgAGBQJN0pvB +AAoJEBDAHFovYFnnIEwP/R3nyf5Iumbev/4okNtMWq20bMODJqYI6WeKt8ZJBmF3 +yK2WSwuGIQMAJCwdn07xwviR5SsmgpT6JfMa+bvi2n8kuKwzIVOYAYka6X2Ivz6Z +lmDyEq/hbIU6HF479aGIp3ta4xRjLlsXDjtCLTVe4SpVCoZBZTIY/5HLNVOxkAhe +Fu8y7IQSxv2x3YF4HHmLBj7QkzXPgAvRQZy9xUJkYy51HtdEowo4dVPLMPBLR+XK +IfRyVjrkSyISG3YKqSBzlaXYh00TSR+zTEzlydyiKn9Alx5jBNVx1+iWTN/uQpG8 +AyTq6Quf44jV5Zwv4PnBXKdjSYj0ikZ7i1qdM+ZMZjrwgaanYtcdLf9ep/UEy6cv +iAdCD+T6f1iIUCujiOT1cAxO0+4/6zo9VRChYsxzLvbs5m9ST+GJsTWkTTUNcJPc +jDaVIC6I7kTW969CpAg8VcuWRb/82sVVWM6818pqiHy5dVtajKPHfWriv92eZXp1 +GpoyVZsvhvhspxMV/52lE0dbE/Wq6rnc8aop/q6tTCWG2TtnmHItNu5PaXez0hN7 +QsyhDkhl9oSji3tqq1Do3zs4XXTZszGzkpmk/zw6BorhmzLEE8gz+vrMFTHgyP5f +4uEUlb8RyVBEGtlBRexnQ2wLZceOPCk3+JKnu/ONhwGDmkD0FntCjWbsdn/CG112 +iQIcBBABAgAGBQJN4PknAAoJEBhSRsiCiTQXVhkQALBHCCaPWGm8ib0wEtU2z9sh +xIubykb68DZE0PcZdn4IUqiuKNRlQerc4N1aB2NZ8+o6D/pP//pVjiX9st3o3YwC +5aODun0J6HfeadT5uJMnRCNkgskD/nclKROPvumVRyWcSDZSkMzlxord4s2kxBz6 +hKeMFNvlqxrhte5Al2fy5p5RkZGNRkg4IY4lKIzPRtfqYZ3wG4oXCitEL2+ds+Gn +KNLxX6BEte2vpivkb0Ky+LN+swpcmSWPFkg63bYK/uJiailINl7+x9EZ/v2b7irS +rnEcfafgNLusHFHBtHei2W22qJcn9Js70WJdOrxYqqnnPP/IXkbqjlwI/jqSrkR8 +xz17DpV0zbxBT48RlA7Ts2YQSImP4Tmch2mueGNVpv39LPyEleaLW8KDvVzNgxfm +KdAizPr/zhgHmUku0g2xZNUnAFTGUCXwegrNnfJWSfdArvtSV6VJBLZ7VLJ8gAKm +Foax7kYMQn9jpE34KUlLuyrdOPGr699Ci0NWt2jCrozCQGAR0z6RdO3ULmtKdtX3 +0X1rbroB+NGaXyPvsRaB12KDP4j8bmJvVliv8VOJPxpFbn58NJa0SPf7MuhxeKXw +9zTkWyzDOi2rc3u7o3AZv6UqKS94ZnUUmpsY3rx6PhZmL2+Cw1yRJ6yQWu+uPgKM +GgDAI56BuoTh1r/jdPyKiQIcBBABAgAGBQJN/o3BAAoJEFVrv/A/CgS2kCgQAJ74 +FcXvzVbGFnvO8TMLlG/QtvraNhoqbPOSG/HnjZEmJXBfudx8/3tpX5Dmzu6QsVlw +tXGHy/AAwg/+oEbnwmrFtVdtCb/Kem6SlLXLgwuF0Cz2NBFMV8rHTyFhrxnDmJ/t +3XsNoJ7VpZhEyR2K7ID+6aa4ahVbkop1FRMJviGtooGXxbIJHsN1CQaNVBbdLF8p +o/12oMdRffahulVGj39xoWUDGGONc+2oefwWYgHWj8F/MxRq70rd3mW8IrcHEXkE +bLkgvLY9KZ9XxjbgFMwKZQuljEGqprflB9Han2c6mkAAq8Xa5PW+50OBIbceV646 +onOOktcn7+DTB4LyPssnV4SXVc1kWGzhjQaM3wiuR0R2w3/ULwqw4nqLVPfytGSK +PuQRy9mKH3ov4tGt8SO6QnXkwk8XR/LwljI53uKHfy8njcWwu4v8Yjysq+STrGd1 +3sHBOLSLgCNxnKDbaQRTBYvb73gJJK2M4Lu2yqDDTqj4cBR1g9ynuNflWSxzhzEe +qJDYP9MGKTd4nA4Z9IkHhndF1/EQHdGvEhirrzYdbxPfHNgI3huBCulPl9JRJpsc +b1AA5thlYoFZueSQBUw7NECaf28BHeB/POuXM2PsSL6/NVlEFttG2L3yVd/qGjZ/ ++6+xVlIvt2qnSc0x/jou3UYpnKaCEK1zTgW1QRu0iQIcBBABAgAGBQJOHNkpAAoJ +EBY3Fz6KGiVILJYP/0zzkffTzNxtzRTRmUQOwlMNoGA5mcK99fIk52tYt/MgG2fo +xI8FhoT3FeDX/NsY+0d3LoQCRIWWHRMKOpRYva3gnh3PndShm7Q1VgeLgzHadbbG +1wIu9U3ZI+2ZU6NrhBK4MLxWidWUz91jP39o4WFgRcz96xDS5W/hbBYYR6JcZZ6h +f4bY1X+rzF+Mdsi7evr5m93p3IWJxgYUuquC410GGT3xpcjr/boJtoh1x9huD/d7 +VCMdXhuKHHF40U8LIbesapC7y2sOp1ZfvpgATAuEoN+yPKE3bqt/gpYfeQq/xbbo +hrM/mXMUnd3xRoaZ8k7Mpy55U2FYasWCTXD9JElqITDUXEio1v+maQFSRp1d5FOC +LlQrT+JZxoCTuC3HfAcQLnozw7jH+RyemQGYUb9EZIVd8RAQsk2NBPzfdHEUCZRD +OjxMwle0cbo+GEy5kxkWpH2O8FalPmrrFBXs7oWdcHg2MnSlQeGorPpX+LkWU564 +Q3H9sO43TMIEStfQ9fjU0UsoCGryI7Av/Q3dYbP41YX6lx0nsMdFELXOlNBkic5I +7O9bXo6UekiXA1Lbe6RlL7R6PGy+0ddNwJdZNjo6Qmq2Whk9f23MnWUMcmPHZTet +pAOWpPok/MJ0ROl8tTptXAtH1fuafLt6t6DXT4Q5OplXLnIFZA9cjX9LZ0w5iQIc +BBABAgAGBQJQpam6AAoJEDQP10kiCQO5gE8P/1wqP4HHrTElco7aI6/eUz2MsPnF +fXXKiyzUzUTQzJ2lm76XsJ4T1/hzXwWCYRXE+HCGoEPe36ePCT4HHq2fCceEMWUe +3EqA6sPdPf4ELvrvL6+adEGtM391ukalwtA2SSIrFCEvJ9DqQzGWKbz2XRLQLmrO +sK2gC1HD8P3Jfe8Nib16RTCFIDeQ8uLSPX98ML0w2Mhwelvj1oQkdSmKHvA2lhOi +lKfLWj9y0Fwr/Iwl0zlAIOng4S+9FLLQTJeGZcM/J1p1zHwnra1B9M09voCzRlaf +jBcOgnZKk6mCMtSItxm3dMkcwUad2Uu5e4CTey0a5d7oYI7RCYuz+uyv/NAGyc6d +Iz178UnWGADZFDcj2NSBO+WlyWhBG269RLNrt1Ud7uhQzy6VMMOUDEd7HUDWKy30 +kRr5eC/VFc7YXEpMdcWRIWrsNoDSkunnqrbJ4r68zJmGky3Gs3McXUVoUXN7gBGF +WfusENxI5IuL0Kw4x90xwSjlfQK7bhyTBd9OeV8ZP3hBQ8PNnDxCgmrHf+qBNOMz +9lgAeyL6S2UxiwiuqL7K4Y+F+jEjrbFOYm6Pk7t+MdKZX6yI43RzrS0gdhFynvWm +GnaMJqkwrGpTOAH33csBYxyvfogPujFzElOf+OaMDSMXr4l3bb2JX8lpoLcYeeeu +nEN3QJi++lSFy7LbiQIcBBABAgAGBQJQpcyQAAoJEASYUj0W8TSAgOYP/jGWK8IH +e+Wrg9PRrK//dwXYX4GxtOYI/eQhNNWiwOOskxf8kF7EUt+xpJghW4JX0ClgAnEc +76x4FTUS2GlXYMp+rxAYtX21FIvlraX10eCyU4PfgycnpDRxrF+17yyJ17Q9N4U5 +tHEQhvWC6j/3xsU2y8k0UUxL9I5aMIZ1WYU4gTNBfWrLBxz3Mo5As2y/Rr0IkbFu +r4bU6/d1fA76cVv5+TLqAAyiw+B2xvgAbJn6f9AjVlGu7rI/IsEiKveoCRkC6s1z +XT+K85P/2f8L4A2/Zd872Nk2q4oaZXVVsoaaxLy4kVm1dMwOCgHjaWmTpaJh35Dx +Ttei/D4Qq0RdZOObLTT1G6q3cWtRyxU6PMC1c47ohgCQV7lgKWE9/WwnPMlVqDHU +aBUehs/zoW5+wqfL6IJzI5nCIW/Xhd2TYYZaKLmrQlEM2wf1E4ZNQfJPQdsvDL2N +k3c9Ffwo5wtSSW5aMCCAlUAuovpdLEHnZG9Rye8jq8c46t1AxInS/t6SDx69JiOd +dWNyTpZVSvzqQiFA80J6jqHjcluXRV1+th9AEoyTTe1Mx5OeViwiIk8mI0pA4HPm +mCy4n7ee6K3ngbAL3H4jSNfsABr7qugDRkMacVWjC5cHwAhjtM6HuLDRy/RFbMvL +KPAPFobf2awBZHfzoXsftsQRAWv+C4rk5ULwiQIcBBABAgAGBQJRLa5BAAoJEKun +Mz7JOD1DMQYP/RalXWrDFT8h83Ppr0u+VDP85gq0yid3yEfy4ufxt6zIY4N6066H +QRo5z4wvHWqxp3dpDMwRo5z1LtQDeQqfQkrv4uA8eR19JFmQ/Q6jl0lje+Xm4Avr +jvY+7MU5n51/RqDGSAsbC4dijsaxmePntkzapx8ElRBmeycoaUVlC87vMT1bsdPE +ff2GL0163cB3Kue0xrEpIwvBhWOWWZcZsfcPPqYIycW15A3A70WD59X1f9+LmyaM +MnnDggpDpVA/yKe5CxeR+fTIHFmx54Tv6oOWIdmsb70xRr5Ssiap/kRFKjxQT8s+ +xunlQCMjiw7+tUGsf+tzHdwzels9XgrExcc01JQ8qUe/rCGSlzWNDTNKWH4Lo34T +I9PPwiK+JO1AEjUAXI4LvfF7wrXbfiRkVtIECuZZpOTs4UzzgzZJEibG107eO6lG +2+ZAKNXp0AoBc/+ZQhEYZR9bzgUc5Mxpx2Wwpm3VLG2FXDs07qaSubADnLEhcwZm +kJXNUi/LAW5/Mr22wgBgJFwcnf7w+YvO7zbTpPBuNcREicWR/b89YFMinfNjTg8D +7MOa7cMrVubZYbFarGsRCuTzhBTRJrksLZKp85J2NhxYsAo0C1jOLM7AOryEbg+N +Mh1/Xymbk7FrO+WNW/ogD8lbUNJD1zQ/c8SwcxkrvYEDZQvX/3Q0O1EeiQIcBBAB +AgAGBQJRLqepAAoJEIKLGiNYahCeCzIP/RvG2ZrJBTpAQ2AeoTQW/+czRj3sgIi+ +R1wqq+QdPnBEDUHsFm5XossWgc1BoTRfS1IJwxyRIfdWJ0GG4BO8Q1Pr8+MhSxu4 +V2+aX8/tTbZv+yjHZpUJ24GOvNyxDeu2/wRYxAA2gAdETj+2rKhEQU1tUYQhHFPl +f4sImjQfkb4ECgwbKv00kmrZzcxov0ZyEwwR5JOxoPEqRJLNF3+mvEkYNHSqjgrI +no1D5tB+tjPlA8W6KZLqslaXFnny0ftSjzm9/3bdh7D7HXcLiedQLH6YAhuSjKmm +0zwh2J5WQezyWcnZ9gRokipKSQEw5As8ob9d3tH/nAzR699Yds9MvxPIus0xwtcG +pirWSUKap6hamatpdTnm7wgmutFxhTkODQ7DYni/cff7eDhyGXuIj6SKlrmfLGrQ +VALDm26AwWlDWt9zfGmZZ/eDHeh8hOK0Fyvb+H0tKavRM1tjNkPvwntrBWlYPf4c +7zsXdj/E9tyrJmeonheQ59hsd/d4EO4v2/2+sFYeWpAoFOUJqqsOWCo6kyUnPbkG +lJfMeIfGdPSoHud0VRrOcyYXAai+w6Cj5gH/5T7QZ/fBpony08pwDOK7Imbgadjd +f62SCv/G02vQGNOiE+6fSs3kcM86cmTWDfKghQhlWHrTPZbwPzBqEXHrlE1rxTBX +V6mDdm40ucA/iQIcBBABAgAGBQJRMCV2AAoJEIueQ3vhtc/NNKQP/0KHUaee+Y7k +6hldAdDVXp0dD9HyP0lsFMRV+zQnh6XUhbY76IaoW4q8IISJC86vgrodg93CieEQ +a+00cOkxkzidJ0SFSMExOcB6B5pXr+QQAo8j2qn0ibl17wOc/uztbgzItGifjl6D +UQ0WGRc8MpEFCCbYUo+UQGFEwxLCCnkSdEta/NNcJ5TwkDjH6aFwBzjKGN1v6wXJ +YGoalDe+eGH6/NihRM8PeFGy8sz1YkUbwRl2lpRy9Zl4nWVON758+YMsaa92PIi6 +euGomNefk2Dbx+hY+SanwDAxsBoBUmp5DvmJ7NgmwG7CIqmlRZdMfumzyvs/giyZ +QBDR4j1Xu4QCv8Bp1M0yLAnkECUXwfu+FcMZd78SaUAeikgMd2maWRidR8ByzmFJ +DTKTMkj7+FnGuYQnM/zTIxKL+1vXewlJyQYs2rB3VABwfuWwylJ+JQNFLEz2OZWC +tluKXO2paJL2fh2T/QL1gaiIlQo6Ob4W/awGvHeWh1KPKCwZXsOO8/imrs3HNhj0 +tHUeuqwewy4nTJbMzgKTR/kQu+rHE4LFsdLySpQMNvoUoo55Whyp/NgYUhdH+Hnq +c3ARuApMaaZ3dX3Hq19eQMe4APBP0ibEaG8ELjPu1VDClTEAHdLkCSV4gjQdOycF +3t4gAPchktS3YkRBtbJ5LwEblcsKtyJLiQIcBBABAgAGBQJRMGyQAAoJEHmz9P1h +fdutwQIP/j+K/tp183pZzktWHzE1qRrbPpr+vZyTrDlTBHtc9Vcm8oaSvOvn85kQ +mEfCevJdbpQoPQviRoOq/OY9ySW/nHjhkQ6CfSqdZC0lDpjyYbFCEKwWel0OfzUI +y00tUZUzJh8pIjy2U4zRBAGLt/7RyN8XkfpdDlQgMl+LQzeun9HwGS2m1O2NfWTf +CFXK73pPGm1zG80q3P/oNgYa3hiLQH+YTAFJxxY6XJLtfUtWBXiRFIRI1acojL11 +lNM3yLf4cteGSREXBq/tLYm3LGuxQf8L61p3mh/e7RWXmCwdqriyGq5swQQsl5pO +jc2DWDPfXRm8jz7wIAufCttaL9/CgpqoTtvn+gqQO//twyI+K6xZnHPsbRyblY2g +gxTHBS06LpgFGmGAy2gCholjDILDRt1Pokb3zfoA4JJrUh5B2iTgaqcveIURrOEI +sr6S8hc5Fy8Ah88XX8bktLWml7lHD4if5C+u84mf/sudmFGQ4cPxISVZg2n9PnPA +RodNRYfkeqXnly0Jdjmf6hpUDvk7ahf0SJo01wVbF/slSanxRGO43LynTODEmgOb +KZkLZxHvrBZtE2IvWKBk3Lt/bZJGOt5SIcTA5mrw/AwyKUsoRQ/yONdamSz2LHHa +ckvJ8j43tgXg5kPbko5HeabqLmvG+5EO5LWY//hvH0JCyMlBwXoliQIcBBABAgAG +BQJRMK1cAAoJEHe2tpqeTcxrGIQP/2c/AOTutrvWxseXYM3nSHtGMj8uiWaxlRPX +YDym8HkSepHxMVBS0YkUH25BnSIf2jVV5cogTDfv6JbXYeWFN74KQGvLbFa6/mv/ +iARb3f5w0YJA+zaAZQx+OJZ+lwL6LYDLhHAvZUcaCXmPCczSuZlbapDOVowR+Air +oQfgXLkUUQ9vsKgpJYSMLv3kvMbj7xMx80rxu+gj7Ghrsa1XGlencSxRey51y2TA +iXHxurWPvrMTLt3C4BArfSC49jhjnGCm1xkBXPr8/SQ+FVu/Tb3RqgIQtusPFa00 +FFL3FKc+dkR3MXgMjtXupfU1Eyqtj/qihNh7s0X4GOs/AzIJZjDr1yjnQAWVePs/ ++ep5+euoPqdZymSkKMsKl5oU62TBPCnQ/4n1v51Y+NrSjUL+TzABqrjp6AvVdrV8 +1Jq22Nj/iSDTPEpnQdl6quf03QhGvkWG5bpf7NVOVLs2GUU3Mnny9kBkRK4e7fac +qg7yCX1inB60QuPjIqYwTpHCaskZ80XpylebZJmEnMFwhXG6mjS+FQeFHWS303sD +Rv+G1uVcOmfHCtX58Wx6oknuniQhfTx93YMLHloCvTI1g8E3p8twbF7j16f4gMLX +XRE2o8D3HK7BYuaA/hH2MV1odc77IUZ1zPMf0ip0M0fhUxNMKMAioSS9m9EMe1Zs +0br1J3H/iQIcBBABCAAGBQJRNR7/AAoJEPOtXJSmf3B+vZgP/0LQm81wxLSAXkk1 +H1GOxe9r6iptjvl0ngHKzlJ8wWZN94h7vRb6wWJmeYBBSxe4Xeid+uasSYpOywlX +rgkZqhV1DBDYHcZVte42Fvc3uTiLdJDjw0KxU4nG2+7xzMOdxy/StVVHu6xj8IX1 +CAUQgFV/PMXAIXjQLGaAM1ios2wAUYA3SKKlgBbPhT3J5JqmqM85zJU8lEdiu7od +cj4P17xMT52+YOHfopJ+ZG749Q8qyn23ASywLdtidJNrnOYx5A1bQIRH7HxDCWLw +a75BEsNtL4bm5qjWvjVbEmlhFtNMeLXbpOl+hFyoOXhayLna4rT0N9ZF7w38rxi/ +jnqFybDeAAeW6GXzaa4LujD7XNlmV1z2hT3jdeLLcaonQesF1FoqFjbEIxVLvC2X +Ld7TkrYCiikQhr7f5ZqpBA+7+8T39RaW+2zM69gU2LS/O4ZkQzjGrugwL4mp60r8 +FzWxMdRv5jt8GvFT4cagqHWsqgfeDEXDAAs94UJgZm0pg+dE/L83kV/LTJI4HLQy +Wx0yfVQOUfei1/zWX4SFmxQSG+DB9B37Wsvel0c2aafMqrbT8xGXIBRTz/7mLiBT +wzNOTr+99kqQDwvHL+loTkKYBcnHuHAy4i/PP8gyLjvjPk48rkXhFbtRYg4JmYoO +1bUHlntileQgA4kDiw5x3/USVgUPiQIcBBABCAAGBQJRSiOXAAoJEPOtXJSmf3B+ +MrIP/0TfdF/DvG1VsJa1/Mm5XSFfMovOzM3duqCP6YN4zgOlpbNT7JkhrNFxFj3H +VPJUdUbW2tViqU1m5sY7uibS1KnS+qOx8tOOw5ymn0Ld/9ZO2bLBNmhtt0HRckEO +fZfQSTA0PjxvAkrHTxWsPO3HN+aFUVzWR6dVHpRq9gy5wSxqTTycdrnTgTe9oUxP +UbUDCNSLoWRBh08434ldte+z/CfcyA9e6wyj2tYGegrYFzuWL8oXXdIrDFvJaKED +FlMJxwBO63y7iJvIMf5oieNpdiuu8zyY01S6V/ohSpLMyi7NtmCRQ1g4Fse2dJfg +wWXhV8rUjsNMAczwrEF+hUR9HslZObPuEtvX2FVSMCguMMvk64ih2lOAGenlfX+k +yeK5We0V8hcaRmtgqRSTfqymmNRf+5do5vjVRVLadwHiMytnL5XZErztJYYUuP7z +xTGChYZUHJlEaI8Dd7Vxe0twXoR7oxjCcrpPB3NjkVYdKHkMLcZ6O+AGTZZfJLeA +w3ewWMJV6AxwusMC/hNrTqPYavm1hH4tpfwMorb8itmk9WRtcrxP+bIkPLDOyidp +mdc33BulvqEZ6SH45eJ75TIUFiU9mSm1S69uChlOV8C64jz7RNWI4t/ET8M5Da+d +9l53lSIA30kZJX/OiXAoGN8aqP/7geLnqmrGZVvsoXOSB9qSiQIcBBABCgAGBQJR +MwxUAAoJEG8M2ucAtomd90sP/in+xTSQGI8a0coESGAfN3v9JrxHNuhJthUFeIzS +j+c2hY3bZf135JvZTQxZ/PkMXVeYdTWPspMT+sjKZ2QU4kmni3v0lyj6utsmwcjd +bNMTM+Mdf0LPbMFnaTaBhkKVheErOIqT8TKewRstqNc2LXWjnTvIgYkJJd6T5tow +sJS2EaNXOFvThBiw+5TLrarAglM0aTu2RI0nrxzqCjEfgQ3mKBNy+awaULD+95FR +wp+sTiSY4I0CpKpPAGpMb2iuS3YNmpyVeODBlZOmDWm9vR5DgkVmVuQzv7x8zKq/ +Ufsc9ECzCEw1rcQ0+BXlWGvANk0Dn+bNNZyh7iR4pCBko6rV39VvM6THfAeDn84p +Ow5FeaclpzVtuwfEf8IHcEUJr58zg643n96AKrcazMnOdX3fErQA2/Qmxt6oHXhy +mdMy3Syu32BcnVasqFSsv75AsefGrf9TS2IJfFeJoj8qENQZnNNlttX9tH+5aWMc +6bCQdfJpYS5YkBvzgRhCQKkOK2gGt5uUBbHJU7qOnAstSgf92fbOURynGCAFGavi +/Ud8JV1RZc/osyv4AjyW+GWLuKltzE/zDrubZrbrBIO0qvNvQEfjj+RUgk5/SlRa +40T7eqOwsGsvywt8Pm6xl5oSQ4ed++QWqoy5FAiIWk1PtdArCIgVysPYq0n1c5Os +6YLfiQIcBBABCgAGBQJRNAv5AAoJEGLUj60WoN4BrRYP/3sBh//WwX5W2HDNJjw5 +VA4TypB3Army8ay3HsL4KIOFxA913D7u7zfy5VvTSvcatOjX73+ag/6ZfLriHuuN +mdh2AHK1lAWnrw5uv4ezFUAf0MgL5h9K98HvwHFyiwJKSNCEFcCqZQlfhLFhPeWl ++h8O8jsSykRLiqqKcNuEXZ3bSFwzuCX29I6x95V3FMGDJJh+cib/VZYDtMUk9ZE/ +cCar8gD4envxbhX5UqX0OASFYFYahl3DDJaOop+ggrHSqTFpdls4YDWWineo14v7 +OrotnHEVyew3d99LhnCIyACvCxAoWhKXz7P69EVmoUEOGIcLgajnhBarxyfFP4Tl +6JOuxcJDvwzURiGL9EHpsyZixzW14OZ23zfpoOlSckmw4ipUVd+MNEa9c5f3l6da +FX8Vw3XT7Ob21XAOaNhOLWRGcSsqTmORO9rBqOLndfIb9s8Gs1xbSM19lYEV90T+ +0sb3dllSHvOEviIJE5vQ1vADVozwuV5QfK46rafadEOAqGphfwrcvfPPZo1sBQmE +FOJMkNrHWQKn2Dy9sFAAg0vwt9+Mq3O+HyTQGeDUQgMR3cboxDidY9jftOcRlBNH +Vkrwkzy5D9+2cH8w1oI14YlKSv5sTgAP6U/iM2zAlUtN+jF4lCzfk/S21CTa43XN +gxgZroHiprqNDfrH0NexXgfZiQIcBBIBAgAGBQJQqDUSAAoJELV0iWeArwfTplMQ +AM5Aa2ItZWQQ9CRW9FJrJ/aUmN5ZABEhhr4BQCZfdkuCUAbJPvbrxTng9Qo3WCOf +rcs6pTFg2bjAQPUMz0JZV0rVwhbpfBTVHxuRL6PZ+TsSodDHYVq5Nq3atwtfEO2m +swM8YszSNeE88XKdNHjWUindE0yenwOhkdlGlydVKgswLOCGbq6kIYBU+VXulX9Y +FWNXdA2l600X7YklvhkUzhQP3Hy0N9t4G/YFiZnf7mJKgVQF4gxrn/NEtOrPd74d +pH6U+hzSRjewTrkYmEaO3/oi/VrByyb4GRsgRdvKzL9AI25Qld3+WxcCTRvNcnLo +0aU/CAQ5yeZaTO1PfW2lyT8ocpOcUk7MkuOxnpQLMl5rD8IIblpjl6dGxp9U1OjS +Ow7NVaw0XExR+SU554Q52Uq5nl0sl5fMrRQ6e333juiYwNaMHtOVJ9iy0jU4lkdF +8HafpElHT/WYjM741y5bzgm0u/LjtaIaWgdNrzt5VSlI2gUlX5QuQY5gHvQ+PAzE +pIdb5JGRt3v8QBKKAalQP7b3hNgDh2WS2W3z52zeOXrQqoeI1lZFcwZaZRXDkiy7 +01A2sYcisXmBvGxsdI3s6aNQ11lDsHopMRO5B836Wk+/cjQP1CgktY3hY6V97HTf +OgQCefwQ9/gu1vDfHQgz3yB7sz1cWgdcnjdRW9KOaVTJiQIiBBMBAgAMBQJQpaj8 +BYMHhh+AAAoJEJ+P6v7dfIukrEEQAJU35Gz/h6f+AtaVI6uxBartLIdcMUMN4qHO +AmrP3hRLn+F3d+70eABgVHHwGeRG6D6jnAIh2BU5CqHF7sLxNom4V3ZPN34/s33H +J+cTKVYl25oXcPwWOeVPD0/RMlLpJoUtWIaUxEzUsg8jOdfAgbQ9jhLHPkWdl7FH +oelT/c3xQ78X7WFx24wQsNQYygwLKgMF8iLfOLdwmSOdOeknwHYH+O/haZWUlujG ++2R+npBaLkrqeh5oC1SYHPDsyIPqaN+jZNH1BFZgJ3Vmtgj1p/kiMfPMm0m2Abb1 +MzOIgmC72ftog2zc4+AdilCW+tDzzhZrGpyqj1AUgCxOwv2MXv+VMiOLakpDuaki +WvpP3wdQaP8Gy+gsK33JAb66VczZ6XHwfxH3wntKmM57C1UpeCNcoE2FVDuljTPO +SCC1wtJVF2OzmgWTnEO9BSKuv8JRg7Gs6HpOq/6H5fnkJF4Gh9FA2X/h/FdaZAg0 +08vCgmyB+dgZOEvFBFLa+x0GWRjSbYbuZ2G5Qj0RdkCeqn33tJE4R9kRYHhyQQsI +Gk0hKd+VsV27yAzirVKbwQ4AIZsU9OoUmyujxUEnONUS3SGLzzmJ0ia8Smhz90r+ +xDJCQWx8E9mEstVjhbjSXHU8RuJtXvIaQ2/RVGAlXrrYSqm/GbZTV+XlMnUCS/wR +vF2WcYPOiQI4BBMBAgAiBQJNzmTOAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIX +gAAKCRAsB1SyzgaS8wFvD/90aLbey7xMXOIr54bV8A1EVxiLy6c93OWGoVHitVgE +2PM9k4026Jvfu39kutswBIe0Nd3Lr5XJJaR4OqnHMw06RttV4ZSAEspxVoMNn1HR +xEislAr7t9XT/iBBay1ulzYHYKsQNq13LNWQMh6o6G8SY+wApIRYF4HFQ6SLfbrR +9256pMCLNLDSF3aOczRizRMUx/MnZm6MB3azDAvSPvEmUP5B6keLh9kdbXLNVeEU +jnzb53WSNT+ZGySFTTKAItRwQ5kFWv6i1d73ztdXsdY4FEwQVj0MHxnVk4bNXM4x +2Q2I8FNB7nqOULrHHAJo+s4EcNkJnsZVqxV0hnvWtjvCTX2pMVtVxI6Uv8+4Fh3o +b6sI7fO7ifZ7SLKHUwZ8VysjmVbyzTHYbXSCiW61vaITtqGY2OYqqUWxbxxSMMYJ +BVSZ6TKdm/UgVsocLQQP2ljmSrVilyLyo7vpgscmY1l/62uSaAmfT2l6oiO3m+V2 +FSn4KCh8lF2smquB/AQRDnfEBRUHYZqhAh50IikKpCetNd1BhpP43RkruMLZPMOH +Nan8cagArOYjq9F74P+CPVjVjz5knhCWMroBkrRLcLSFZMwiT6I70PzQBjwJaasM +L+R0PNOYr2xo44dcPZNniS2jsMD2Hop1i8UTMYSTvMTjGvKYIzkrDR0OqN8F9Pah +oYkEHAQQAQgABgUCTdDxuAAKCRCKr4jW2E5BrpENH/44u7+sL18WMoMuP5yPcR8y +NGgmC2FwF+wguKj1CpxPhBo+bk6+lK4nGxPKMcVBv+ACbKFHP+g+uub8x7T4eoBo +C4FYgp6M7g7k4IJrSOB6XOBmidib1ch5G4tNqU4A2a9UsACEVRYjIDLPUtCBdhfm +vB7D7mHowgU4GCRYb609R8+G3YXDwKrCbcdMjcexoTVT/hB1xz7XuzmKgXfqkMKt +mi/xgVGZBTKRNvrj6bKncsKcx/Z7wrXniGOXnJusuFZQ+IqSVzeA1zXW0ofxgl8p +HPC3EpF4/D7tYQq/ig+jKtM8ZZGEC8wh3OKsByIqABagoPP/XZNXj+Ak3CXmy5YI +LsNjEiS1BJM1L3p0MEtMY97qc+wK2lG8V16R0ZQRQtlGyVcO4AyLzDEoGcJwJyGV +bTlmRJ695FnKqXqNoaoOjLRvc8N8cvCAsngySZhidciZG3TS/wJWoUFCNY3ASRby +tr4uFvovLbicsulb6ihQLtonUg30fnx3IbJrP9B1GML5lETuRoi2SU3Jts8ApOkN +GhrtQCLJ0sNfL5ywFiE5vy8xVj2ombNY8etlsZFOU2RYFZLgO6bd/vfsDoqFObuu +YkCgfhzfQOPzzbZR84fBdN/y0UqaZhySB/Gq+2kNoeS2KEY26zPBwBQcifwAN7LN +cqCcdsgyZInmRs1/v1kahNGePMh/zKSQu5yzJj8QUaDvdsAwkgeTcYax2Vm3GGN5 +jUyo82rmpbfxSpYy3zP9YfcE1sLEJ1mQ9VJyLMw1z3gR5NGgyI7EnBwadpBAvSOO +VkdXpxltuqfmVd980t9Bk+nS7ACPVEftX27s/piEQZCgMPG49kS/LEpdB1f527yJ +VNPjmFF57ZCHTeYactOay7pYDu5x0LdqUjEikv0zK1YBNzCoJK8OQORbMMYS28st +JozHapce6N0x1x0i3LMdVVwFg5BhdtGvZW9nLnIfU7p8tOP/d9XufQbWs37ffpkt +d/xp0rC0Rl3PKBX8sSrijmrvabIGo3Jt+6hrYU4be0Ap0IAtIZotJuNtJaw6wfsL +e0KqHkcVNXU98gJwDEyFzFHWVpkojYu50lMplY6OswStFozf29KzvQ6Dg30cRDfL +ItH1Lz1+/qmXTbCq+dC/DV/54qZ4OZd9z8zLK9H/1lFc8/2e55YeNzTUhiOWfj14 +8x8C88EUYKHgGbCpx8CLy4zislEuu/YRw5xFAo5+PYRudPAM1EDw+7OB7Wo2Ls3d +npXTchO7ItxtLNcO51bJMwSRlliDwMgrGqIY63rRg7GLseHcHkFYnlx3ECxfoIaP +v+0ov1RD0JUV2XZVY+Bs5MV04NPo8QLDCsmvYVMNkCOHgk2alwAzlyb6L7d0mVEv +iQQcBBABCgAGBQJOdc/cAAoJEO24wIKm7mkI1dUf+weFZnH6Gb1YZCazC7QE6wU9 +oWMaxGZXJ+NJh8N1SdGU/13CWDFPapCewa7rL5eNQEIlIoOwE050W+qpMduRtZqj +aBjhTRcAvRt55LGcwn6gFhA1J4hS2rhTerqGr0lQLI0L74MRFPIpjE1aGoO/TbBD +oOmutvtbLM/Ei13v0ua28SILS18oOiFw5Jo84z8jkX5uTdsqw/+BTtEcgfXJQ7IX +FrM1Ea7cRGVAxKxwjksrKdsSp5EVQaNtn4mjV39A2KkaPmgzNxWirObfSpYpby1i +mC1esQgFrTWWYAltGgJ+OAQ03XlUxV1wagua/wUl7Vcig7ov7wXj8iPF97kgGYM8 +jkSepdreaHp0JFdn96RPLwmBwkLvDksQvZEUUV2DPynoVHYGCpyZqCsezZz6mpDC +nJT/PPYzqqEv2aRMRI5sNivRJy55W33W/hZn2hAjwyWji7ZbruOkiIFAd1hpyqqd +kD0yW5xTYDitXQqinwfD08tB6eTIpGZUelNWPFcPQBBLqbR2OzMeiTWkc9K79i+q +s5PRsQlct+S7wmNIijLtAz96jR3ZFDg6vPDqkrAG8ZMnHHoKrqdmRPg9+wo2BYpM +dcnbRiIEZPqnGbrmMKRsyIfOdOVslATkZ5/znLPwoFzmuheTzD7y4ss4wDnkd6yU +/dtq37sIbob7t2D4PN4rythQOR04texVq05m2djFzc5SEsS7WCK9V+3cAXRANvwP +bL86UJXvyrXOp/pwWFBKCSL7Lapa9L33LriDroLRTky5pQk900AziM6d4SspxT9D +d5kTchiNuaMwy6pKzOcyRBzs0VZrEi0qlg+khIsAsxKuY7bImwYJXDyF3VLoOptp +SRZqWIMImZT8Ve+Mri+BLJ3jrCvjtl3RFkIFjyFYC24MgjSRh4mgM/g7YUVKdWO5 ++M5yO/wIfmro766DaRwdCzCVGpjZm9McO1DtVl9SpWk26bkTz41jS9vIh+D+VGw9 +T1ss24AnL9gCBcEso4bVEDKZCswDqgYN0VoxdbAW2JRYaYC1EzAMhwqugjutUhsx +sBixTIQS6wgWJRFem8EKt3QTS3YbGGPnAwaqY0VR9lhzlwFeb9uxwQyx7KIZNvxO +XzMb553KoIctgw07y22EBTkzHKepSefXho281oTc+sAcSjogWwhHW/duCY4RTKXK +Am6zmObMQrAr2/QsCLleeh/53+nOovlgP/ib/ji5BZIAkERuUVAx2lqnUt0OIhEd +dAysonJK5qknlzmZNLOCy6omWuOkVAx/pKDU/WkRI5joBdx610Wqi4NrzBppH787 +KUWx4FO59pihwGS54sk+2FJcWX54PGqDtfMIr2X88LcopuO5dCuN8O/tnst26mW0 +IlRvbWF6IE11cmF1cyA8dG9tYXpAY2xvdWRraWNrLmNvbT6IRgQQEQIABgUCTc6u +VAAKCRDl0wJz9Z0l8LB9AKDWmWTkfHsdCbxKtodZkfqNLowhFwCg5nat5uId2WEY +dJAxBZu2xqOQKX6IRgQQEQIABgUCTdEX+wAKCRALA65uTiRRfGQXAJ9U5nk1+/QM +Elo8qzkfu9rsXmzT/ACaApXuW9IFJCInoiR7BH1xlQLu5HeIXgQQEQgABgUCUKXV +XQAKCRDwqefc055FLs4nAP0U9FHFK7o3bW2OcqNbdflADHrpxONyRPvDYGrSIJWn +CQD/dx5Lc/W89efh/5gQySvatYALaXEWQtviMEoNeKYjnDiJARwEEAECAAYFAlCl +qu4ACgkQxIGy/xYSKAbMbgf+NlS9Tr+IA/fb+oc/ZOTaHaPX1BYyYj2QLv6IetrO +kWAM8sqatpUXqe6BVEvKHrjjWscMpKjlbWcz1MUPIo/VwTWO+XMmRtVM64ZUTdEI +6ZQ0U/91yvG69HyD2HuCZx63qUUkvUVcwAuFFj8Ezm/i6A/4I2xHtoJ01Plm4pcO +fZotrICBgMdQx1oFyKc9fS5RMomeVF+VSk7BE5vKZ4AnmXeCAE7PF4egAfw9tlTM +eooon6hMHTAqeA2DZFonL8jCaFFx990xOQU3dgrHMfxOQ4ZmwgO+XLPhnxh7bD2n +nsPeO61Bn/wDV1PxkxWerKIpTmcTFpqQxYP3BcwU280JB4kBHAQQAQIABgUCUKWs +iwAKCRDQdRn60Lyaw9R9B/4yVLBIlcxCcRp6qkkS+fJOgs+VST7lvlCOoounkngK +fgoNLSXpHz24AeXmviW9PT8OjYLbL04VeMZZic+erjBL2QYnYoVtCbqmL29WRfd3 +C7y4V717VnnxyyVTH/BIdZM8l95y4m3EhnEDZ0Uarng9K4ia+YzWg9K+W04WtLRH +OlNzevibGrGiU+u8MaXbNdJHPUdKSeS/hoJwnmoJyjN+a3LdNPwa7zMYBk1oqeAD +Sybmc777kYO4s7iz8nM0T9MgMfNqYDho1ljKbTgDEI93kRpfCFhgYHyMPz+D+4vU +uRW93l1L3DHVCbd/MErvRUiFFnx/HVqyHtVabKMmBWEtiQEcBBABAgAGBQJRLpxM +AAoJEBwjsjZkpqC6tAIIALK8T0L9NiLxSlvviKJQAzNBmeW7EGR5/kKant7BhIxN +MKJnG30YmbQGgSclD33xS4nIk+qyi0/7arEF813PHrpDEIo4ww0UplFH5Maoo5WD +EtnTxEo4lyacnizib3+/Sc5ftLPhPz1ABDWKc+6f0UgWuseIfOUBfpLJ/mrhx4E3 +jAxEQcH2CFrNnnJcKhXXJApye+10uVAr3Cy/jfAksKZl9D19X23aMnAO+pUeneIo +oJ3aT7o5Jok0tWYciPt4XRGw2FamUZzQAay4My9W0WbOOoU3cUG1P2nwXjAzGi6V +mpgjzrCBvGnImeTBOaZ4l3c8q6zd0iTyuKf2aShDbz6JASIEEwECAAwFAlC5QAsF +gweGH4AACgkQ8YCvCL9tnGmQbQgAkZtuwgaBQm3HCUCiA0mUtPFiYl1oIT+pBT7x +Bmm+kp7bpnopvsgmRLx00hczybn2QiWZy5ehGkSZfI3rUAZu97Sh2GAO3LNOrudV +ryci2xlftqcIQtdvqTwSaBLaii4rpRk3p/vIvoEy0VyvugKaDF3wk+XDpMcQCjof +5lPaUKuX7kBMzmE1VkAoSSOZZHoRk+exj0BRokxVv9Udot/mbkksa8Dt4uu3FsMv +yeNEOKFvJDfGJREio6xcLJfsVALvHn8rVktGRhUBZGqcOnpOa4LXudwV07eCNmOV +JDXFEYqmMIznJYuNvU5JJdmW5UMSo+6/00lTjjB3xhr4yu9KW4kCHAQQAQIABgUC +TTTSjAAKCRBVa7/wPwoEtsBpD/45Ljl9SxxFivgBlAPIQlM74OpMrqytxlgB2QHH +qdlAVlogGD8WSx9CbqfOxAuKTW1ddp2E+d9K8h43QBP2k5/DyhCjMskuyiTMbgad +aT8gCFNNsdyO80ET8sLZjI6rfSS9dGkpwXQKoNaMbDjHZAB6mOBtRrSyeyIYLM3s +7VqAMbulbIARyTjq70BsPCest0OeNTfIL8k/7idhgLd5a7rA14hTjI/QBF+pMbc5 +n1eHhEAXLokEpeRqqkW8uU6AnSuzY9ZsFosqpXlG0pANpXnnU3CwWLK6aqhzrWLZ +B643SY2jE583VDOUO/E5OGouswDndjgYbTy61A9Y0mSV1f4acCNvUDLAQUPMCnv3 +n7rniXPixCo+zSa3ydTM6LOUkWZYMOOHrjZcFbp71d1lD/anbLuxrIbde5DthP/7 +tc7T6DYKhpOdxj8JjTf5RLlwjE8+54gQn33HaDa7Df6ptfN2uObxq/geB5o1eRUV +aC5A9FOTT3lr5qvttCv1jQxtflworgc4B7oZkpw/UY/H5aqkNfJCPwvos+scmh7I +rJ7b8FWBjPPmu7nMPhNfYNzA8+LJrPprjaXNtNgnX+63rgqFVLan9dCTaxIfhpDh +38g8kKveyV+Sm0KSEI7w51NrpF101od/uRf7e+iPAJl1rDXfp2aFt5qjO6W5IoHo +uqJo2YkCHAQQAQIABgUCTdKbwQAKCRAQwBxaL2BZ57dLEACfwuk/RXK/TtK58ZDE +5Do6eMAHuAbNGxeA3d2Aq/g5nnT8icUAQ2LgqKXUMTkS52MW3e4FptaS7hkWjYmI +FhHJpje2Jxq8+HL6zYGMvQLiMKINUJhoQaCxC42SYZlazfpfeYl1K6ksrRalC5/k +2A+SIznhoXPFnOBgR7UA8T/a7hsRlrOTVDDaC74SdTNGUTDnTTyvDv5Sp/5kHowq +9+n2t9wsRmbY9XR6xOYA94b6gDmrVCIJnfWiKonUwKu8jc/XbVLLz1AvEq1atGfb +LSlitukqdRLuI7nGbv7M25FlmOqCkpr4HOEfYc5hGh5nask6LpNKqoyUpfofJG8Z +lHAy1ZAGQ+SpYNuN2BVaj+XhEM6+tHLo4li8z3ovWPlnKIJRXLjLDUDtXnIZON1E +YrYubKZU+zV0QkYxFDOSiJqNuLxNFP9pfAj8LBsul4lKtdaTVTx69L8pfA0XqNsQ +xj3XkXUVbpK7OwfgpETuR+cho84DTCFF02IgkdhI4RbqxocVSMcX4y2qOjyQCAsz +/sqIe2WXKjCDGzUj2CkoUMinP6iSkTPik+pB0itB6ioih7MfCIWB0pXK5JhtOVNO +9j1ryzfmDOtU4gfeIDhx0pz+LfXy8bsAP49xkCbTbqSE2Y0tXvedcJ9s0wpUTsxj +ckm8eJYv6xCnJWxu3zrSxxiEsokCHAQQAQIABgUCThzZKQAKCRAWNxc+iholSNZs +EACMrAkiQjfEft8LXkSs9gsV3a516QkmdHUn1jAvCVX0QBvT/jsGDjyjIh3euP7+ +ViSNRVfPkcc4kxn2E5Ji/xvVffBAAvkiZb7OJPqDuiqMA1GqESgJkkfybi+SPRgn +lLUhsdBRiioL5TcVA1vQyZ8bCw+JWFMbJmzi9JnSPhE9offEeTa+8qFaSPLrNd5Z +XXwD7Gf/XiZxAlqKpLK68EJEH/oSjmmNBF2jhO8j/hn+sSPt8hyG0BQxv7LiC1NW +eSW6qNpcE35c1DIf74bGeL/9yHvjTI5VRj+NRdYdrplv4pJpDmMwsohdkrqS366v +vaN28bKfFm5jJyfzwNV19cvuBzSp6hpMD+8JX4DrXRpdN+C0BsNFC2ktCerM1iQ5 +ItnYMEyZ1xdP8Bk8TNCEWWxLy1C9apx1W9Wj4vUVnQ46fowV1u6IgjExIrAJgqvE +qQFWwPS+CDSO5TVpmyHOjMg7PUxtECSd8rFA4rSg4iFJuiHWaRdrxUbfGoivHU69 +kP7bep0UmPDWRrWmJm4I53Lb1iE4kA7IRBOiAg+/xnRZB0iXPZVQbvrPwHQx1lf6 +/dhoJpA2QbGLV3CH8wMsCHy6yY/ELTzqcf6hGpZW6mOkmBdJ9Ztgn2FSHDtH1Gr0 +kdI1VTfRo6FphgxKrATHN3IfN71jqUIM/1rSSuPOvaQlOYkCHAQQAQIABgUCUKWp +ugAKCRA0D9dJIgkDuSsXD/0SS7L4ooclz1FDXCYaP+Xr4lrKf8kUW+W634Viii9U +Xx0zdeMOF3T5RW7tlNHLty+9bTqfAOqQmnKM51GSQVxiqyzSZMNIXmFfrKJFn4n5 +hbqGtsvM63AV1x+QWpheSPqbEtrVTmGnoP50zD3vvG0MKAF6cmiVJxuRV3HeIqjo +H4BWcBuO7H1CLLn51ym1YALzvCwhFcGXvdYlf3+fSCPppiPTh00X+7LB4ethZ+A4 +VIyTv6yh2J7aiogD3QY+1xDUqoNAKBSZmNoZSOv3TNLvMSDwqGcNPkHnrg/dfy9n +cWryQt/hlawJa8Ta/zJP76ZleZgd0cWQYCs3znpwK/7j6tIXNRd4vhYXA+lQuEVZ +7BazLo7TLd8a9j5NlxzncK3wpoSPy03mpdz0za9sumYcMP2Zqexj0Fpx4ZlJMQrE +e5+ZwJOCHpVbqGlzohImhz1ApyE4ga0/TZXYmMcwFsmhMfAo05wWElzK1RYjbkDL +d/LOy6cTgPiGIWRsbO2JfgaLV9L/UuA4WYWBFWO/qKeMcQtParw3eJXKWgHr7HPC +WlbJXJOaYmBWW8BCxefl/7DYYuIusgPmQ4eVBO4dOY1uG9fUi6Q/h9PEh2JQ8vtk +YwGtftmSi/b1qN8e29JfC1BEqOExIgYkxDPiH2alsFBVKkaWxTeuxAOUxIS01dDl +2okCHAQQAQIABgUCUKXMkAAKCRAEmFI9FvE0gGtLEACsHlkCFlnaC4J9J36fAcWy +fViGa5arDNGlHfkPdUhe/aRHh5s2V3gqrfqD/+sx5R24srIHdaNkyqCmChnK4Rsd +UWn4/V+uJKreoMRv1LC51Ibq6s9N6vbbJ8EYlIxpdVCG8UqsOzmBIsUCgXnrexJI +JNvQ3uXg1k0mU+ZHGHUr3Q+sS03jZSf+cWdp35ZESHW1LltkGt0+cQYdisPO51BO +DsglwGOaisEMchXm+0NZdzFlbsJBss0581/uBE7OwVx+/uLhKnFdeM+5lUV64j5R +QarcgLOmzlyqvG1idEu1dhOfg3CDXGnA+6ysLfX8wgJCR8nVG9iEJh51RwYOLy4A +opIhpWAvV+IDpqocgoFE6R8AvWYWffcFR7Zi6DliinvtPj4wP0aRxiMVjLg1GRDm +fK5Du9rtHtHfq9ITNj0g07GbcUDPZKOHsHynsVQ7j7pDFDYODFPDAvwpT9OSaD25 +Y/1wkpFobT4dRwJcycBEEGhu0B/bs6PoRmFdu2wTG8Wgv7qXV6s52sY+oPiegHA1 +lIn9xUBh2Ryki/OmH+mhxVgdM1ilbORSsSBHebxl2IPEi7JoLgAZb1LgWs8PRGof +Gee5s8AwVGlZkBQRsIMZT6vg/lqnJ07/sgUXbhIAYtWZx+E5fmOrj49oZVBfR2Em +LZ1RfS4w/vwqakafr2cDRokCHAQQAQIABgUCUS2uQgAKCRCrpzM+yTg9Q5OED/oC +JLYLtjx7ajWYoijqagjvFGcgT8Bfo2h/0Hygr06nY4BT/LPCzYuPrX7ERj7s8vDc +sZduYuujFOI8MLrcyK2FAI1ctsafdjCEyQ4lq5nRot30c+jkdOhWfNyNWIEjneOx +WfssXEcmbRJl98AhW7zDtiu0FIuEYW+GTnJGrMNi1qdrDTusMmi90qH6g41Y0lli +cZOEhaN7iRepIaX8rx+VaJSMB0RuPybMO5Te5Cu/naInVweEe9zbx6mkwGFFq1bG +B0O7bw6nREzYWPLL3yHG/jWnfcrTgEkOnpN678lcBVlYa4gr6ekLOtLaK4Va3eYV +awvIX8ftByOKdccbVGmRl902nf4Oh8ZiRxkqJIjhcaEMI5eSvv/GHyGIPeL8qYT1 +5vRD3doEaDxYByX0f/bj0qUOjmkuKfACZ8SlXWPZ0Rsv/n4kKyZPls1gP+lnykDg +SB7XB7jP7PDj/Z1nZfzrLI+jFjgh3BUasxm/nJZQTI3c6cG8PCo0xZ1KDiNOUVFv +sY9IiNotez7sxwL20FjDw9VIy17ZbvQ/khPc2xQYS2nc+du4gTcItWeQw1j1Mi94 +32vCGAQXyMQ5K02Ccuio6dF/7SFnLcZw6Oz6HGEBLqbVhBx4bev8t0+OqjJiEkgW +0Kj9xzp9wrfcDPueSaFY5U/2XTbpeauzQzsK3CoH5YkCHAQQAQIABgUCUS6nqQAK +CRCCixojWGoQni5eD/45/Wx3gCX9vrTVnLA6+d0xi6EcS9A9aUGirHuejMff2fI7 +sBaYrelmnwpmgdeSNYe/v6StlATffFDLuMPJCowdpCu8Wp/BzsVg6LmQexoOR+Fn +Ms92FOQvZolpzcwbnNqPdTK+NjAUC8ywbu95Tv0SlIXV3iO6IoSgv3VaWw3lDepy +Zi1uDf3ac9RGCB8LIFIz5BfE2/Kf/o3PDh7zL/9xWcbolOje0LrMuwCDB7PyDBr7 +CVTps/MyaWrI05nNADfHLj7vOc0f3f1B5V2DnJnrdgrHav6Zix+tD3qgkG+rB9RU +0QhG7V1LGu1y4agmxlR/xEHTHGFuavq6f2xl0/Rly0ZwF/Zhbs+InQBnKDa996Vw +zXRB4vpDHu34ZC3var8T/guemgp2Lswd2RUt2R1QDni/3Fb12WOxhskNqPi9Dx+3 +FSE/GWyeBftoUBFE64Qtb2sYiSTRXKh40HiHRE6KyYkhzQxCVJHJEJgP4yuEl1n3 +D8ni4F5vOLrUTHfmEagZGG3z5f9YWN0kB4aT51Xfcv7JH/fhclw89Ui/LYvtX3Un +1QydaU7sFq17NAUuLLzGFn0aSLbC/wBI/PeFOADZ7/70e2mEEJxYpOPG1JXNbIlw +lA5etsDP+L1ra106aRUQFQwi9c1Gl8LMXgL3K+0aOGz4Vmfctx4ZHE816WwJJIkC +HAQQAQIABgUCUTAldgAKCRCLnkN74bXPzf3cD/4uja2dJeeR+XdU5iLSyL9tcuAp +ZNhqu6SkKW4/16oj+Q5/23IAAm+fmbDUjlxwh7Up8+mVkXeMhWWMVlza2CD5lHxv +Akr9H7wldieLYLxt5RgcyAjScN1w/XqDG1pDSN2YyUU+F7GOZFv2oHhS2iDCDHGI +23QSbRha2tkuAk8eYCuGNOuklkGSf+AntryoV9h/4leFNKAN+pcWhevbvJqplu5J +boOBXEdKjlILYRiP5GBwXucrYchfjXB5KcFMWzKeafWh/UUC2mqLe61nIy6RRILZ +svzyg9+8fStrs/iqon6BpaFHGxX0saGJWYQkuN0ZZJNx2rtys7xrqgyN+ZHDH/gH +2JhCoNsJOWfJZyEdrKy04jkxavMkV9ts8tybn01AOM06ERQIfBRdhv9z/quHkAS9 +LbRJG6t5BrORhw6OpdBcZouHTqQZGT3dP2+nZjylyZnFjdrqbZ55SYytchnDRdpb +b1En1a3zfWaCpeeZbaNNlz0XBIMDdsLa+XpkMRz5SWoz9eLTd/azXuyfiejQPNm4 +u/CldByi60RdueQG2EKLmeGqigLR5YOL3r/xr10LtjVsZB1CLGni9UKnWw2B5pz7 +BuWxdsAFtBe+E8eI2h228KjKR71NruqayrtolpOh3Cs0dqW1Hn3R4fPSGVL0a4Zw +/YO/L4r7kMtQcvpixIkCHAQQAQIABgUCUTCtXgAKCRB3traank3Ma8ptEADBQaFL +dm6bycOsVWTcW0Dru7+ILSA6JzvBHNMoUC1Qh4kX4jOEljNFrvqpELhrXUKXRLbb +P3kDLgE59uO5+b95/ykh+O9vLCE0NyBH71Eduq6bL+3FLJLRvtIfQTpQH8zKl5ba +I3APXlc/vOf4qbzCoSdgOrVnR9oRRfbjOwGynr+koKl9svUgkFe6/mxDbfxKlzqW +Prdn7ct47rumms1TScixsecjYniXllIRjaTD1Ck2fvFRGUHKMJdUIqXf689/Husu +9B8Jq9oaAsvmsRqn1OHam0UWAeiTF9hjXu82gcowaev48Tu09Kjxd2X5TWe00O3r +XSQldi5MGVsGXRVKIPTPCZm1UkUpEm7aDwqySlHn99z2EumFBTnQpUfQ3jwbVyWd +LCabeOlrTvJ5nJ4i889shv+qMpJPlR4/nl7kQoqfanSOPxazvZ95x4IVJbXg62VI +vheM57r+VWZGKMfEvOLmaJ1EWAArWLU+F8yZUlA0rNVkPT81ARRxuYfSfHNhno/e +6pFgoyjZjTNsAmFFu860TUmLH/n13xY9ZJjx5FlUg3WJ16o3S2tBEBIicUH2x14Y +egv7+PazZoaJmSZp+K/cD0wJpZicdOiywnpuQJQ66wVO3nm+E0z8ejhdoRiYNEMe +kHp6mHBiMcvg+viln3jb23/vmbcp0heUNwA6LYkCHAQQAQgABgUCUTUfAAAKCRDz +rVyUpn9wfuUWD/0SoAPSrV2lBtHvOgfOE1VFsKN1XOicxuIGOfzib2VfhN9yREMn +B1VAU9Xi9iGnfnhVGKEnY8h6fjZP8X6JFFDUgEZqLhYpVIBe3HUbhzEDQzKJLyte +G16RVRhSEqzI4GMAgAW9d2OUQTIug18BOLVliQHNyfcodTWiYSOby6ruCdF6ZHzQ +lX0d20SGNDf3MJ2WIUaYWXuKyOxpzqryUixKllm4l2JurU5tg1Dv1uN8AY4NHoqf +6jKiJnSCDZVqMF8mbMycAyne0BrOEnqi3sERCABOwBs5TrgN740Tm2CI3oUdwH8v +NpmT+xwqwsXx0NnvOVXEB2K0bfjaWKIL7KNM1NhhWtA5fY+zDfvVBgW5CE/Eix6H +xVl4QCDRZtD2+pqweVUJu7Mmq4rMCPiYjxTTuYIhSa0TdehDh3UHh4mA/tJaI3Ca +c/2Agoc9qTjXsxo7mzFNF4KXSUrULn6Zd26vQHKajTZe7+UmJpAJv5bQlrNLGYb/ +kS28SApw+rFVYYQgt7jP8nev1qcAY2JN9sGnI9EConpr8U06jHIxBrE2VJryDDs7 +r+AOqvvaAQ+g13oLT04NGOQ+bYXj0Prd11Wl7GCtxDpeIk2DI7mNrEFonxV0KvXP +JTkZdHS8GrhvqAxdsaciwKcQiRJBvWP+E52Jm4rv2rSdAZdlQjcqtfuG/4kCHAQQ +AQgABgUCUUojmwAKCRDzrVyUpn9wfm5lD/9nfGS8LnLVy+5S/4M7TroBe7oaBRZK +aJtJ+tzJNwS/ZbjL1NTxM0CWqlHU51bGtqe4+vkUviBKux+MXt8qEouxpqCEt1sb +GlVPVY8cTPS3OZ2FqMPbeaD31MsIgX7iaihsu6ENykK671EiA3CFfFZrdNitqkM9 +tou+ome8Q9o6v2DicsdnLAhDbWzVuhwsz1eIlhfJGIg9uIqhLDAMO4S8SOpL5XLY +cv1kMZtOqqj9B2i95gFZrHnZ94M3QBqYzuJZm4NuB9oVaqpc02QXnE/F0T4ufC6+ +8p3mCc+maMoWGp0hELsoUnBpLiK0+6ifmypiuCXUBRqo9TKt4imCs1POrDjZy1tz +23WN3jHa3C75EWjezOx4TlX3WrMHpNIqjRo671u2N64KZ7unxeAUTLtGQLiOXjK8 +NiZ435DIQjwtg9+qRr8p+4AE8REy2+HlsVlz5k9dt6YrdrQGpwqyOqnKnZ2z9u0M +trek4uYTo6IBZ0mw2v4GCbY5Mbz1FZ4ljiKhWG2Bdzr93cq4qVtbztiBRfudfZCM +aTyWXxEZqz0AXx9QpIqGLtNgGP1/Z7ZMJLMctVGA+N4ovd+yIpt1AcO7dJPsBnu1 +o7vY6NI9iNK81SgK4wT4hVGMH2e3kfXuTJ46+uM49UgqwJyYFrKeB1jYkMwG3FI+ +/xPZFRI131C/VYkCHAQQAQoABgUCUTMMVAAKCRBvDNrnALaJnTHzD/4yT1B0SrrJ +rv/LryD50l3hIpKENPFXv032JVA9Ej17YW0rHXHDTMi3YOEeZQtcR0ZISc5OyuI9 +UL28raeJvZ5Q+TT5R72BwTuLc7BHf3dmi4zxxJ651lcf1V4jmDwwcnY0cgXKN/Vw +hKOSQ7Db3XHHrm7t9AuXc7O5P6h2AsywgUMA+grYCgdculypwgHTpDDGhN7g6GAo +HXbmcXSilnGxOm5plGCZ4ew4kjvh5qNpHHBanvG760s0QDfTgTFbeyuk+x6MgIy9 +Tw71F7CRNBI3lPPL1ecYRBG6e38fxtGyTJtoyPPFJA5GQ9DUZJZ8NHqyiur10kr3 +IuK/Q+8xwO6xYI4raMyS/DVOootLjW0mL3oKPpw2FO9Kuw3TOAvNyNroG7vXDb48 +OCy4aSXxdr6A/v5XDFvPeruBUu8no8KwYhcFBOfUVFZbwUdZ2GMjIWFwshi5FJqq +PIU3erp3bwJV7H1QuoZswvDWk+Y50GjJrWIK0t46KyFSsU35oumtqziXolDoxwsj +6oZoMDdwNU1G+ULhFmldYkaDeQXezZLBKbiOno7Ler8QcAmjnE0ixW7McAMkHK6d +7CtWS6drrXteNnfA6M/pqMnp1DniGdLWR5n3wm4S6WKjZ7WjS2JFkp3OxAw/JX+a +SlrBghA0Z3wxjlQ1aNCbnzxN8JKZk0IenokCHAQQAQoABgUCUTQL+wAKCRBi1I+t +FqDeAd2DD/0T4KYOfQo0ZsYmQAHAjw89TXrfnUs7PutjZ4CSWut19YmVXozYwN6M +SMyLC5FgtPWNVPjSogEuUmWBoWYy4yLYo8v0xBhuBPe6jHR7n9qRa5X+61nYj33w +B+p7sQSwKlDPvcyKwOW83aNnXtR5H8doK0OguzJ7/VaRgM1VgB0KA7rcsC2JMjT/ +zvyufoIOhaPE+NV+49Uz429G1VNrdMvt9rNxB/MsuLv2mtLSnb6WJxaki8lsenvK +19TCR3fp/HiWYmlv2gn/OUmzjJYVN5x6G/vdILWG/dP0e4YUnc0mRGyifB9eKY8Q +xut2/j5ukd3c1VFuQf/cf9KCJsUZ6Zr4XleTw7ASZluaG1Bz/HJXzHeLPkbtkVCi +E032+ylVNXSuGYukY8SW1uPpk+clc/f1f9tXF/TC/sz2SsmdvVrmDQkwlZn94YVC +HuyT6eV9KC5U4SlHlAOIj9nrzV4+jlfywpnkVn0YKddec+q7sv5RAetYMVgDN9iN +Eb3L9K/aEKY1KHMDnBjuOeAzDaoZp6HX3K5UHixgNreP+JqpJeHxSWNj44DtfAbq +DEva4+yTQIeXMQuZDjP4eujTtFmuKBn81apaWvg0aVt+NvVC07Oekm9si52XVYyy +vr2asE2CxNLpfNlLH0upIr9+5P39ZesXT8ZTV9w8NPk3KBfaBPPFnIkCHAQSAQIA +BgUCUKg1EgAKCRC1dIlngK8H0+tpEACNXIUigCl4qQsdogr8TlkdasnDqR/n1+k2 +i9MD2hKWEHKQm6qklrPu8IhOD7R+wAty0575tyn9lsbMPDE9kkIDCJGj5zNsiCzo +QHi3Xvvmou3qFuD6upsdEdGJ4lOorobEVVOq2D92wbjn6FlBttTPR6uTQjDuDSnv +tMZ92YKPY+nug0xT5udXJ2RTbiIs4oYCA3WR10kBIoHLca3DBryyPyjad46aKd+i +3mCcswRSKmA16icSUSzr1Ss69bbHoxmc50B+E9m+oXIZRcuTRD2fB8JIYe4mLnG8 +lPglwoGTJkVaXKHFrXJ7FhvNio/ZcN7STyjCTWas2LyNAjfdSB1IgwMRx53XGcJY +ke/i8cGLENcNRJuvVGlHYU1Ro8F/T+lcV9wWUPG2SIweFlKVL0W+GCFKQhxwv9LG +tKgz47MhYxW1ysgnqTy/Zs8zbVk1QGWXYmYjJMwqWYX0T5KWHLGiyo1PPjJWrK/K +fJ/w0U9XPnsTiDG+DcgRiF5lV8DEojKXr6IXyjno7WesipYRUu9vo7ja/nDSkJl9 +pMxT2SNzaleUo99YM6g+uPIwvYAu90OD1I/rdeg0SxXIixgUdwnDWB9T4nE/+5KQ +cagPAKWTGVVv4vYR4oDfMgBk0ySt6EGx/hQGFiyMNLbFnl8mEZl6vDbEslRCwfDX +jsf2RwzEMYkCIgQTAQIADAUCUKWo/QWDB4YfgAAKCRCfj+r+3XyLpFhuEACzgqV6 +Agg/k59g5wOoy6xIrLhV5ABSkIScqLurd/cFlH1Q4+25fSu3D3PHDGGTp0NXmBpR +k167PNlOpG/hyI45ppDCDNp9QA5ckHF8/kecU+uViixWdiU7qrYN3QM7uwxnyVoL +yMC6W3c9hLMZow5QdbfC/Wn2vOVpS9Tu04WHTCel1W3fjGO3mLLuIZsNCK+FDirX +KfnJLUmZqzZnFSW1snj5EWGo03jZ3VsUlBuljdWbFZpasKLtViZI3/v7OpyxyNjS +lJdRfdoEbRGOHMqYrhKPYUxS1ivYSfs/y/FRTUaag8hkazA7hHz5pSf/+vfswGyZ +m6NuCT2ChvFwyyYP/MtSUQlO5Th6jUbK7KTU8GFHdn55Q4eekYZJPHuPhQdLJslq +kvnpoIbWk2e41megXqaA2QWmRrvThjS0ujBxfjZPFF/dkHBYyK6aTG9K8nMVlK7g +ptEcf5bw/bIGO68O/bKMf9MHIw6PDNm7gR//bzIj0Ue9NEWO8sywXcHzIIHeGH5y +qVuogBG60b4e4LbdZjmPmjkmoco2vNgwNzc662AWRhKQRQDK2b/mbNDeTEByBowY +xJCEj4Bx17YT286hgORAOhKs7NtBEIkID/E8uU1mlOFwg6MbPphj0envnteBlYeB +BbuySXxXOcY8ENm+whazIFxONZF6nfC90RXPbIkCOAQTAQIAIgUCTTDL9wIbAwYL +CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQLAdUss4GkvPWDA//X025fkyDfP+G +NXH9P5q8GNadsXS0Mw+pTBPP8NUIIDrW0pfJd5KL9Hwrqu2FuBpt+/p121+nmi7i +FLCRBwAO6pyDc/EuGjfcb3nfzKwUw3/0nDjyZVrbFuwFZ3TWKiZqTFCCwgjv+e2z +ohX5sMEJnnNF7ivq+e4k7vgRBOrbRMUTttigVwvpwrR9iVO0wSHFxzqUTSkdUzz4 +X8N3f9XI2K+awVdkeUgjcrdm7uc2+Qs/fws8CYzxzXP2Ut8Xa7LKL8nBfCiuhTGo +pHzi+Qm9EL/CLT+aB8GPWAuHItpJseACsnLqPWS/AToxZGUXkQR88p9rdpGn1vt4 +iwro0Xmrdc+7y7MPYZS/77X5OE4kpmKrlIoOKMa5SF6MUBkkK+cswh6vOXOAfQKM +K25JsBo3I0JbffWadBEIW46FrTHlqmvJX94iERrwrqe+8D9iRK2Df5k4o61PO3PY +csD3q6jGyJ5bGrJn0tCpjIw5N2YmUBkDLBXoHVD0seUzdtJK1dmS8oM2H6SETmQV +KPKM6iMvpZT5Or7QuBMFg3EzXWRoYRuvKMCa4p2sIonuImfo04sbtQQX7CsgKxvy +/TWK2piI8fPoIOnpRFa9zRjiqE/1ooAKRwY3aUJPxBIT1oCC+j+WnGAwuwMFFQtm +b3S+Da1vhA5GA+l/jy3smnPk46nvBjSJBBwEEAEIAAYFAk3Q8bgACgkQiq+I1thO +Qa6zBiAAlrmu8Yt+knZ934MFNdzIkTbXuVGh4Y5ADE7BVGRJuupUzRDgp36uzbIp +3T3UjnBcK/mL03AJrg/QsKrsZ3jHBP8kbAYTUxefvT6I/qaEBulKehVhHZexkZXL +gk1W/iWGA2tYCNFWRdzQiZ5TmRao80+/uO5uhc98a6clYdj77z/haxVMTvZvaKjL +nJcOVj5MXjSYkpGVX5crLo3R1XxrXMfzoIqfGQ0I1xZoqRkSUhEuJFBpDTytJDMi +bQHxlXry4Y3REzLIWDq3kGWsdPQk8oI0cMDZMeF1p9/jdoXDu8PLbcC3ly1sDDK9 +n9WT70BIdh/nLPnSJ37QJswkY0jWHVfz6jfqJuYZh4OHjZvrTPx+fRdQiohFgzuO +TBlRRGZYdqQn1xm3c/wrrAtp85mIMx8dgJkly9oSr0wvHqbEUaGybc5LeN64oyyV +Cz69pvP4ZTfixqDdFxaocsOVe2qYUpWGwaS+NrchDFdCoOzgKo1SZe4UIvzUamVg +VUVOX1v+MEMKi2znXoxOFLwgKPuDxgk2LQTes24W7/YD2JK01dh5nI0fEGTWJl5j +lYZ2OPvdngaa/15+TcyrQHqtDWR+3NdMkA5jKnLRzcFPrpSS+eASQwaqJ/wPOz9a +28bfYE1Bzrh7T8yKPKAIDVSIdzuQ2TuXBrmFyF9mTxcA6MTPRIbkMHcyA5S2Y/kU +KMIHEISOehE6djQHvfE6l1+m8MJsMvk1ivUKzjs6AUmqLLgl7VZHkkI8Q4XJHjcJ +XTIVig3M/hlP6+0HLeLJ0q87clg9duFx0Sls5flF6trLSBvcCVJtt1XmghFO2o/y +8IHRAmF/VhxmKTlj1E0fyQoy95CZdvdGA9aOyIYxAH92SVBmnosecLJ+DkXRDZbe +RrBbtdh0LXrmGdvDpfDfs8BNYQ1/3vhL3CLj29d9HMWxWtRKIbHsFtGB+4HZMLDk +yjCtt91QXyL6kPcagk79lSM+lPfb6xnBx3Tq3yRciGN5f6ldEJrui2Nju5oclDO9 +tJxd1LUWwZBoGIcFAUmZZIbVYkdFraIl2T2UarTmw3eWgLAwJFrh7vYx/v0F0UOk +jKKQVO9YfiIhsiSdE/wdlfBr2z6PVTzSft/B2nP8r/j1jiY6aZW0+OtG7lPBbUt0 +bbBw8F7YkFhC9OjMjQiMEoXKbJ+UlIJPMtnKVSN5b1Qjf3oIpcOu6qIQYhjwyrAu +d0PECgz8HpnIVzJkcXGJ312I5XQiLPRNh8BmO3N2I2lRu4r2FhJ9Z2BUs974rReN +IhS73fWBj2kHWw9/93D8UnhNmyVJNYogP26/mapKTMTBEPWfQBms++m/a6e8p8Mk +8W2stScEhaS6iPfGLjzWDtA2WNe/tIkEHAQQAQoABgUCTnXP5gAKCRDtuMCCpu5p +CDDKIACNYN2UbeicYbpoU5Rkb1+FuquJRXmMTfcbrQxPxALi1gy+qst6JxaV1YaJ +htXuqY/24Jpl5nOAVz59kuJQMkLfVsqhgorDMi9Vr6TJ9XANhbD4/KlxLB5PhLXA +2EyDXx/AOOQSmxDBr1IwmUUZFjyiS5v/LmaCWzmZEm6eGhzsomxcsTpmR2Rq+r75 +zb5wC/DBuY3jPg+O3MoGzw+6fH6LGjwIt0SwAxCkhi8fuoABcTwIdpbZ1hsmKuB8 +fDQYix+Zsba47Q2IkQwrD4wP4r1pJ0oIuvJ1heGN0N9EP2LAvfPyKMTaqJudF3bA +XgaIxb2DfjfhiQNf61NcNsY+s8euCjuXS3cW9LXCDrDPijkixrE1if5sL9DM/rtX +ZeF8HvrxmUKunMR2wo9+Rjd6EsPJdlqTTZL00ZeWqPvsdtcK19oYj3K3r1aNYbkK +IXaMpjqRvcn6bqPLwhQEfRD8SUb6eZaUKMKfjvWAMFRDQ56NBgiiK33xzhR4fKPu +80tveeJ7+Nv9znckaOA1IBh7/bYXl5z4FgB4lpMa0uBQHRo8bY0yeKFRDbUlgfMh +SiEAgPrdofjZpAEfMREv8QgUjLPKa/6VUYn6gu5F8c++9NTiluSTRkqmPoiyjBtO +frFei6aLE06nsTOWgHE7Mw70ozfJhbxXDLzGiNGpcUyBIEuDx6iAGh922OTafPiS +FcEWGqfijf7XWKaTlVltD5dbRejs4Lxt+RQDBuhWWuRj8s3TMXKS/6tg7dV5vwkw +b6jbZaHA5RV1Yxy4nm91S4CZrdzCyCf0/QAf9ZxdfO3B/vXQhG5iV95fez0/kKVS +zDXhjygR3Ya34zoCoMxoFZPuPZExMEavRnWpaSRK6ti6o48bjHoZZjd1Japxbpb+ +WUxWpQQ1PQk17ySOe5XSgFNVXI7FdZl32MrrJ3Asj1IoaUIx0zDgPuXYuQiOcI/J +epiLSSmVvACwHMegy1gHGs/50Qi9cRzold3kNXL84Z0namTg9aEVY8LHfnRHbAJX +viexepiJTvNhfA854jXlBgo0v+UJkuYS4aCrygFF6nhqRv8ArAtl2jJ/KZgqOiPB +UikFMXmXtW7UmOLYs9PejPZ6LpAuyL4Iwpytcxw1r41W5lxtq3CbQiBDZGdW4ap4 +hxAI+jE92w86mW8KgbkOqeVR2rr5jhWwtIWadVWGoQm31249NSBj5lO9ExuVwJuK +wWQVUhOjoFkVGW1biRecssKhpTygvVDB9Y/L5ZujQyE3u/hhcd3ZUU5F3Or3+t2N +uZkR3eKmQFuiihd1E8/jYEN6dgkPg1dJoNo1vCsiQW7IO5gTu7jNmkle+Ea9rN8X +UKuEbyal2vUWQM8xyau+m76oaf1auQINBE0wy/cBEACbMTmm2P1PYU5FH3GOpus0 +xbbeT360bfFIWX1t3wCWBgfQZ7YMZOQL5NY0hITMr2Ihqxc03hcP1APvOQsPikg9 +xswueMh2oQpBby7bQgK/3fYeq4u+Q3xsa4TQqYsqUxNH3GZLJdtr4R7Hor/qvg5x +sbPWjQP51IUyp2tp4h7hAmzZ1QKmgtP8HyZHeI1HbbRLmE/o0XwEFvNL/Jjsfh0a +emp4m5HJalY61XK7tJQSYytz56sDHB1+fJ0KCN/5ToXc62fXH6quVZnMQKCLUgTD +1pViVYLo+yu+4kgIVnFBNw8+iXuDto0PVMFXYlHCGRQi+clOJxf/yXDWgR2+UpKG +8D4zZmExLmOw4Dj5m/MJvyCV4WRL3CvNVeQniCOwLlskNPvUm75uOjdIj/uOWXqM +uqjhQFjgu/SIbR5OrEv1GiyroBIha7YRhntRAlC09ECFu+NqZ00d4CjJ1mGaEiRu +E7IWyetCw/GsLoijrWl33ELuAySawDxHJMzuyXx8Oh/si1OI1d3uEds1eJMH6VNk +80VvmJkZUG+QHXLazH0KGAHFtLNii3pKj6bOSjYk3hsOdmrETPKi2Xmg9deXCf7U +G9xHuYbKFzGTF2+c8VQ3vpvAXm1jEJrQE/IFAZRLPf2EgQ1ifD30vzrzlooEcq1x +Srl8pDD3Deo28MOqQdjwkQARAQABiQIfBBgBAgAJBQJNMMv3AhsMAAoJECwHVLLO +BpLzHhgP/1N2H9+w9iFxGYT4SVPCjK5UgUGv6rtomvlpIKA8m+BQH/69pgpdU8XS +1e7quBEibPs98IcHdOHBGbtzHj/e2VUiFZCoptq4tgDMhw7YDYNtejYe+OGYBJ4+ +TsZwQVzZCdby6MGFO8eNscAv3tPRZgjVBYxs1MBexSE2iGpNB0zjW4KAb6Ga8/6R +o3xHQ52bpr38cXt+r6lysnHzPtGUfpZx4X7TiEvaxIrvmhOXj3jb1ctBUmqbw9Ds +v/oWsrUtIDQvLAuvrVm3ybMr/xYbFBg+C6W2kNTSi7ra93+kklkcB9/HbxU39Y1a +r1bDInFIpsH0Z9BYngNt9Y5bdg2iZPWSoAU0iGLHoUnaktMXUYstfJAN6Zu5KB0e +AAJPpOO0HXax6i1KrCoDf3lupW8gLCiUt1deHPgkRwPT+gCr9TPfDOi173k9CcIp +1KqUIfKAijTC7Y3478f3Hs9k6NbsxAI6/Nb8HV7Cn+u/4+rqUF4pf4aBMdKTlTaO +QF49TDHorYuO8s/mYGqULFwGnYV3mpzGzgz28V4fGALi27Ul1hgY/rmEohqoALNM +Hq/uIsRFKfbil09FHNZ2WxOt4oHLLlh/jIPU1Wz+BviuhC207w/iwlgEh8tpzsJT +Q/YO1BeZzXoA+3PnLvm38m1u+7C9rZP674TfWcWj/7ItB9YH/1YU +=Eer2 +-----END PGP PUBLIC KEY BLOCK----- diff -Nru libcloud-0.5.0/debian/watch libcloud-0.15.1/debian/watch --- libcloud-0.5.0/debian/watch 2011-06-09 08:54:23.000000000 +0000 +++ libcloud-0.15.1/debian/watch 2014-07-06 23:55:51.000000000 +0000 @@ -1,2 +1,2 @@ version=3 -http://www.apache.org/dist/libcloud/apache-libcloud-(.*).tar.bz2 +opts=pgpsigurlmangle=s/$/.asc/ https://pypi.python.org/packages/source/a/apache-libcloud/apache-libcloud-(.*).tar.bz2 diff -Nru libcloud-0.5.0/demos/compute_demo.py libcloud-0.15.1/demos/compute_demo.py --- libcloud-0.5.0/demos/compute_demo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/demos/compute_demo.py 2013-11-29 12:35:03.000000000 +0000 @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This example provides both a running script (invoke from command line) +# and an importable module one can play with in Interactive Mode. +# +# See docstrings for usage examples. +# + +try: + import secrets +except ImportError: + secrets = None + +import os.path +import sys + +# Add parent dir of this file's dir to sys.path (OS-agnostically) +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import Provider +from libcloud.providers import get_driver + +from pprint import pprint + + +def get_demo_driver(provider_name='RACKSPACE', *args, **kwargs): + """An easy way to play with a driver interactively. + + # Load credentials from secrets.py: + >>> from compute_demo import get_demo_driver + >>> driver = get_demo_driver('RACKSPACE') + + # Or, provide credentials: + >>> from compute_demo import get_demo_driver + >>> driver = get_demo_driver('RACKSPACE', 'username', 'api_key') + # Note that these parameters vary by driver ^^^ + + # Do things like the demo: + >>> driver.load_nodes() + >>> images = driver.load_images() + >>> sizes = driver.load_sizes() + + # And maybe do more than that: + >>> node = driver.create_node( + ... name='my_first_node', + ... image=images[0], + ... size=sizes[0], + ... ) + >>> node.destroy() + """ + provider_name = provider_name.upper() + + DriverClass = get_driver(getattr(Provider, provider_name)) + + if not args: + args = getattr(secrets, provider_name + '_PARAMS', ()) + if not kwargs: + kwargs = getattr(secrets, provider_name + '_KEYWORD_PARAMS', {}) + + try: + return DriverClass(*args, **kwargs) + except InvalidCredsError: + raise InvalidCredsError( + 'valid values should be put in secrets.py') + + +def main(argv): + """Main Compute Demo + + When invoked from the command line, it will connect using secrets.py + (see secrets.py-dist for instructions and examples), and perform the + following tasks: + + - List current nodes + - List available images (up to 10) + - List available sizes (up to 10) + """ + try: + driver = get_demo_driver() + except InvalidCredsError: + e = sys.exc_info()[1] + print("Invalid Credentials: " + e.value) + return 1 + + try: + print(">> Loading nodes...") + pprint(driver.list_nodes()) + + print(">> Loading images... (showing up to 10)") + pprint(driver.list_images()[:10]) + + print(">> Loading sizes... (showing up to 10)") + pprint(driver.list_sizes()[:10]) + except Exception: + e = sys.exc_info()[1] + print("A fatal error occurred: " + e) + return 1 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff -Nru libcloud-0.5.0/demos/ec2_demo.py libcloud-0.15.1/demos/ec2_demo.py --- libcloud-0.5.0/demos/ec2_demo.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/demos/ec2_demo.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -#!/usr/bin/env python -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This example provides both a running script (invoke from command line) -# and an importable module one can play with in Interactive Mode. -# -# See docstrings for usage examples. -# - -try: - import secrets -except: - pass -import sys; sys.path.append('..') - -from libcloud.compute.types import Provider -from libcloud.providers import get_driver - -from pprint import pprint - -def main(argv): - """Main EC2 Demo - - When invoked from the command line, it will connect using secrets.py - (see secrets.py.dist for setup instructions), and perform the following - tasks: - - - List current nodes - - List available images (up to 10) - - List available sizes (up to 10) - """ - # Load EC2 driver - EC2Driver = get_driver(Provider.EC2_US_EAST) - - # Instantiate with Access ID and Secret Key - # (see secrets.py.dist) - try: - ec2 = EC2Driver(secrets.EC2_ACCESS_ID, secrets.EC2_SECRET_KEY) - print ">> Loading nodes..." - nodes = ec2.list_nodes() - pprint(nodes) - except NameError, e: - print ">> Fatal Error: %s" % e - print " (Hint: modify secrets.py.dist)" - return 1 - except Exception, e: - print ">> Fatal error: %s" % e - return 1 - - print ">> Loading images... (showing up to 10)" - images = ec2.list_images() - pprint(images[:10]) - - print ">> Loading sizes... (showing up to 10)" - sizes = ec2.list_sizes() - pprint(sizes[:10]) - - return 0 - -def get_ec2(**kwargs): - """An easy way to play with the EC2 Driver in Interactive Mode - - # Load credentials from secrets.py - >>> from ec2demo import get_ec2 - >>> ec2 = get_ec2() - - # Or, provide credentials - >>> from ec2demo import get_ec2 - >>> ec2 = get_ec2(access_id='xxx', secret_key='yyy') - - # Do things - >>> ec2.load_nodes() - >>> images = ec2.load_images() - >>> sizes = ec2.load_sizes() - """ - access_id = kwargs.get('access_id', secrets.EC2_ACCESS_ID) - secret_key = kwargs.get('secret_key', secrets.EC2_SECRET_KEY) - - EC2Driver = get_driver(Provider.EC2_US_EAST) - return EC2Driver(access_id, secret_key) - -def create_demo(ec2): - """Create EC2 Node Demo - - >>> from ec2demo import get_ec2, create_demo - >>> ec2 = get_ec2() - >>> node = create_demo(ec2) - >>> node - - - And to destroy the node: - - >>> node.destroy() - - If you've accidentally quit and need to destroy the node: - - >>> from ec2demo import get_ec2 - >>> nodes = ec2.list_nodes() - >>> nodes[0].destroy() # assuming it's the first node - """ - images = ec2.list_images() - image = [image for image in images if 'ami' in image.id][0] - sizes = ec2.list_sizes() - size = sizes[0] - - # Note, name is ignored by EC2 - node = ec2.create_node(name='create_image_demo', - image=image, - size=size) - return node - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff -Nru libcloud-0.5.0/demos/gce_demo.py libcloud-0.15.1/demos/gce_demo.py --- libcloud-0.5.0/demos/gce_demo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/demos/gce_demo.py 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,309 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This example performs several tasks on Google Compute Engine. It can be run +# directly or can be imported into an interactive python session. This can +# also serve as an integration test for the GCE Node Driver. +# +# To run interactively: +# - Make sure you have valid values in secrets.py +# (For more information about setting up your credentials, see the +# libcloud/common/google.py docstring) +# - Run 'python' in this directory, then: +# import gce_demo +# gce = gce_demo.get_gce_driver() +# gce.list_nodes() +# etc. +# - Or, to run the full demo from the interactive python shell: +# import gce_demo +# gce_demo.CLEANUP = False # optional +# gce_demo.MAX_NODES = 4 # optional +# gce_demo.DATACENTER = 'us-central1-a' # optional +# gce_demo.main() + +import os.path +import sys + +try: + import secrets +except ImportError: + print('"demos/secrets.py" not found.\n\n' + 'Please copy secrets.py-dist to secrets.py and update the GCE* ' + 'values with appropriate authentication information.\n' + 'Additional information about setting these values can be found ' + 'in the docstring for:\n' + 'libcloud/common/google.py\n') + sys.exit(1) + +# Add parent dir of this file's dir to sys.path (OS-agnostically) +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +# Maximum number of 1-CPU nodes to allow to run simultaneously +MAX_NODES = 5 + +# String that all resource names created by the demo will start with +# WARNING: Any resource that has a matching name will be destroyed. +DEMO_BASE_NAME = 'libcloud-demo' + +# Datacenter to create resources in +DATACENTER = 'us-central1-a' + +# Clean up resources at the end (can be set to false in order to +# inspect resources at the end of the run). Resources will be cleaned +# at the beginning regardless. +CLEANUP = True + +args = getattr(secrets, 'GCE_PARAMS', ()) +kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + +# Add datacenter to kwargs for Python 2.5 compatibility +kwargs = kwargs.copy() +kwargs['datacenter'] = DATACENTER + + +# ==== HELPER FUNCTIONS ==== +def get_gce_driver(): + driver = get_driver(Provider.GCE)(*args, **kwargs) + return driver + + +def display(title, resource_list): + """ + Display a list of resources. + + :param title: String to be printed at the heading of the list. + :type title: ``str`` + + :param resource_list: List of resources to display + :type resource_list: Any ``object`` with a C{name} attribute + """ + print('%s:' % title) + for item in resource_list[:10]: + print(' %s' % item.name) + + +def clean_up(gce, base_name, node_list=None, resource_list=None): + """ + Destroy all resources that have a name beginning with 'base_name'. + + :param base_name: String with the first part of the name of resources + to destroy + :type base_name: ``str`` + + :keyword node_list: List of nodes to consider for deletion + :type node_list: ``list`` of :class:`Node` + + :keyword resource_list: List of resources to consider for deletion + :type resource_list: ``list`` of I{Resource Objects} + """ + if node_list is None: + node_list = [] + if resource_list is None: + resource_list = [] + # Use ex_destroy_multiple_nodes to destroy nodes + del_nodes = [] + for node in node_list: + if node.name.startswith(base_name): + del_nodes.append(node) + + result = gce.ex_destroy_multiple_nodes(del_nodes) + for i, success in enumerate(result): + if success: + print(' Deleted %s' % del_nodes[i].name) + else: + print(' Failed to delete %s' % del_nodes[i].name) + + # Destroy everything else with just the destroy method + for resource in resource_list: + if resource.name.startswith(base_name): + if resource.destroy(): + print(' Deleted %s' % resource.name) + else: + print(' Failed to Delete %s' % resource.name) + + +# ==== DEMO CODE STARTS HERE ==== +def main(): + gce = get_gce_driver() + # Get project info and print name + project = gce.ex_get_project() + print('Project: %s' % project.name) + + # == Get Lists of Everything and Display the lists (up to 10) == + # These can either just return values for the current datacenter (zone) + # or for everything. + all_nodes = gce.list_nodes(ex_zone='all') + display('Nodes', all_nodes) + + all_addresses = gce.ex_list_addresses(region='all') + display('Addresses', all_addresses) + + all_volumes = gce.list_volumes(ex_zone='all') + display('Volumes', all_volumes) + + # This can return everything, but there is a large amount of overlap, + # so we'll just get the sizes from the current zone. + sizes = gce.list_sizes() + display('Sizes', sizes) + + # These are global + firewalls = gce.ex_list_firewalls() + display('Firewalls', firewalls) + + networks = gce.ex_list_networks() + display('Networks', networks) + + images = gce.list_images() + display('Images', images) + + locations = gce.list_locations() + display('Locations', locations) + + zones = gce.ex_list_zones() + display('Zones', zones) + + snapshots = gce.ex_list_snapshots() + display('Snapshots', snapshots) + + # == Clean up any old demo resources == + print('Cleaning up any "%s" resources:' % DEMO_BASE_NAME) + clean_up(gce, DEMO_BASE_NAME, all_nodes, + all_addresses + all_volumes + firewalls + networks + snapshots) + + # == Create Node with disk auto-created == + if MAX_NODES > 1: + print('Creating Node with auto-created disk:') + name = '%s-np-node' % DEMO_BASE_NAME + node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7', + ex_tags=['libcloud']) + print(' Node %s created' % name) + + # == Create, and attach a disk == + print('Creating a new disk:') + disk_name = '%s-attach-disk' % DEMO_BASE_NAME + volume = gce.create_volume(1, disk_name) + if volume.attach(node_1): + print (' Attached %s to %s' % (volume.name, node_1.name)) + + if CLEANUP: + # == Detach the disk == + if gce.detach_volume(volume, ex_node=node_1): + print(' Detached %s from %s' % (volume.name, node_1.name)) + + # == Create Snapshot == + print('Creating a snapshot from existing disk:') + # Create a disk to snapshot + vol_name = '%s-snap-template' % DEMO_BASE_NAME + image = gce.ex_get_image('debian-7') + vol = gce.create_volume(None, vol_name, image=image) + print(' Created disk %s to shapshot' % DEMO_BASE_NAME) + # Snapshot volume + snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME) + print(' Snapshot %s created' % snapshot.name) + + # == Create Node with existing disk == + print('Creating Node with existing disk:') + name = '%s-persist-node' % DEMO_BASE_NAME + # Use objects this time instead of names + # Get latest Debian 7 image + image = gce.ex_get_image('debian-7') + # Get Machine Size + size = gce.ex_get_size('n1-standard-1') + # Create Disk from Snapshot created above + volume_name = '%s-boot-disk' % DEMO_BASE_NAME + volume = gce.create_volume(None, volume_name, snapshot=snapshot) + print(' Created %s from snapshot' % volume.name) + # Create Node with Disk + node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'], + ex_boot_disk=volume) + print(' Node %s created with attached disk %s' % (node_2.name, + volume.name)) + + # == Update Tags for Node == + print('Updating Tags for %s' % node_2.name) + tags = node_2.extra['tags'] + tags.append('newtag') + if gce.ex_set_node_tags(node_2, tags): + print(' Tags updated for %s' % node_2.name) + check_node = gce.ex_get_node(node_2.name) + print(' New tags: %s' % check_node.extra['tags']) + + # == Create Multiple nodes at once == + base_name = '%s-multiple-nodes' % DEMO_BASE_NAME + number = MAX_NODES - 2 + if number > 0: + print('Creating Multiple Nodes (%s):' % number) + multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image, + number, + ex_tags=['libcloud']) + for node in multi_nodes: + print(' Node %s created.' % node.name) + + # == Create a Network == + print('Creating Network:') + name = '%s-network' % DEMO_BASE_NAME + cidr = '10.10.0.0/16' + network_1 = gce.ex_create_network(name, cidr) + print(' Network %s created' % network_1.name) + + # == Create a Firewall == + print('Creating a Firewall:') + name = '%s-firewall' % DEMO_BASE_NAME + allowed = [{'IPProtocol': 'tcp', + 'ports': ['3141']}] + firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1, + source_tags=['libcloud']) + print(' Firewall %s created' % firewall_1.name) + + # == Create a Static Address == + print('Creating an Address:') + name = '%s-address' % DEMO_BASE_NAME + address_1 = gce.ex_create_address(name) + print(' Address %s created with IP %s' % (address_1.name, + address_1.address)) + + # == List Updated Resources in current zone/region == + print('Updated Resources in current zone/region:') + nodes = gce.list_nodes() + display('Nodes', nodes) + + addresses = gce.ex_list_addresses() + display('Addresses', addresses) + + volumes = gce.list_volumes() + display('Volumes', volumes) + + firewalls = gce.ex_list_firewalls() + display('Firewalls', firewalls) + + networks = gce.ex_list_networks() + display('Networks', networks) + + snapshots = gce.ex_list_snapshots() + display('Snapshots', snapshots) + + if CLEANUP: + print('Cleaning up %s resources created.' % DEMO_BASE_NAME) + clean_up(gce, DEMO_BASE_NAME, nodes, + addresses + volumes + firewalls + networks + snapshots) + +if __name__ == '__main__': + main() diff -Nru libcloud-0.5.0/demos/gce_lb_demo.py libcloud-0.15.1/demos/gce_lb_demo.py --- libcloud-0.5.0/demos/gce_lb_demo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/demos/gce_lb_demo.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,304 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This example performs several tasks on Google Compute Engine and the GCE +# Load Balancer. It can be run directly or can be imported into an +# interactive python session. This can also serve as an integration test for +# the GCE Load Balancer Driver. +# +# To run interactively: +# - Make sure you have valid values in secrets.py +# (For more information about setting up your credentials, see the +# libcloud/common/google.py docstring) +# - Run 'python' in this directory, then: +# import gce_lb_demo +# gcelb = gce_lb_demo.get_gcelb_driver() +# gcelb.list_balancers() +# etc. +# - Or, to run the full demo from the interactive python shell: +# import gce_lb_demo +# gce_lb_demo.CLEANUP = False # optional +# gce_lb_demo.MAX_NODES = 4 # optional +# gce_lb_demo.DATACENTER = 'us-central1-a' # optional +# gce_lb_demo.main() + +import os.path +import sys +import time + +try: + import secrets +except ImportError: + print('"demos/secrets.py" not found.\n\n' + 'Please copy secrets.py-dist to secrets.py and update the GCE* ' + 'values with appropriate authentication information.\n' + 'Additional information about setting these values can be found ' + 'in the docstring for:\n' + 'libcloud/common/google.py\n') + sys.exit(1) + +# Add parent dir of this file's dir to sys.path (OS-agnostically) +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from libcloud.utils.py3 import PY3 +if PY3: + import urllib.request as url_req +else: + import urllib2 as url_req + +# This demo uses both the Compute driver and the LoadBalancer driver +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver +from libcloud.loadbalancer.types import Provider as Provider_lb +from libcloud.loadbalancer.providers import get_driver as get_driver_lb + +# String that all resource names created by the demo will start with +# WARNING: Any resource that has a matching name will be destroyed. +DEMO_BASE_NAME = 'libcloud-lb-demo' + +# Datacenter to create resources in +DATACENTER = 'us-central1-a' + +# Clean up resources at the end (can be set to false in order to +# inspect resources at the end of the run). Resources will be cleaned +# at the beginning regardless. +CLEANUP = True + +args = getattr(secrets, 'GCE_PARAMS', ()) +kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + +# Add datacenter to kwargs for Python 2.5 compatibility +kwargs = kwargs.copy() +kwargs['datacenter'] = DATACENTER + + +# ==== HELPER FUNCTIONS ==== +def get_gce_driver(): + driver = get_driver(Provider.GCE)(*args, **kwargs) + return driver + + +def get_gcelb_driver(gce_driver=None): + # The GCE Load Balancer driver uses the GCE Compute driver for all of its + # API calls. You can either provide the driver directly, or provide the + # same authentication information so the LB driver can get its own + # Compute driver. + if gce_driver: + driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver) + else: + driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs) + return driver + + +def display(title, resource_list): + """ + Display a list of resources. + + :param title: String to be printed at the heading of the list. + :type title: ``str`` + + :param resource_list: List of resources to display + :type resource_list: Any ``object`` with a C{name} attribute + """ + print('%s:' % title) + for item in resource_list[:10]: + print(' %s' % item.name) + + +def clean_up(gce, base_name, node_list=None, resource_list=None): + """ + Destroy all resources that have a name beginning with 'base_name'. + + :param base_name: String with the first part of the name of resources + to destroy + :type base_name: ``str`` + + :keyword node_list: List of nodes to consider for deletion + :type node_list: ``list`` of :class:`Node` + + :keyword resource_list: List of resources to consider for deletion + :type resource_list: ``list`` of I{Resource Objects} + """ + if node_list is None: + node_list = [] + if resource_list is None: + resource_list = [] + # Use ex_destroy_multiple_nodes to destroy nodes + del_nodes = [] + for node in node_list: + if node.name.startswith(base_name): + del_nodes.append(node) + + result = gce.ex_destroy_multiple_nodes(del_nodes) + for i, success in enumerate(result): + if success: + print(' Deleted %s' % del_nodes[i].name) + else: + print(' Failed to delete %s' % del_nodes[i].name) + + # Destroy everything else with just the destroy method + for resource in resource_list: + if resource.name.startswith(base_name): + if resource.destroy(): + print(' Deleted %s' % resource.name) + else: + print(' Failed to Delete %s' % resource.name) + + +# ==== DEMO CODE STARTS HERE ==== +def main(): + gce = get_gce_driver() + gcelb = get_gcelb_driver(gce) + + # Existing Balancers + balancers = gcelb.list_balancers() + display('Load Balancers', balancers) + + # Protocols + protocols = gcelb.list_protocols() + print('Protocols:') + for p in protocols: + print(' %s' % p) + + # Healthchecks + healthchecks = gcelb.ex_list_healthchecks() + display('Health Checks', healthchecks) + + # This demo is based on the GCE Load Balancing Quickstart described here: + # https://developers.google.com/compute/docs/load-balancing/lb-quickstart + + # == Clean-up and existing demo resources == + all_nodes = gce.list_nodes(ex_zone='all') + firewalls = gce.ex_list_firewalls() + print('Cleaning up any "%s" resources:' % DEMO_BASE_NAME) + clean_up(gce, DEMO_BASE_NAME, all_nodes, + balancers + healthchecks + firewalls) + + # == Create 3 nodes to balance between == + startup_script = ('apt-get -y update && ' + 'apt-get -y install apache2 && ' + 'hostname > /var/www/index.html') + tag = '%s-www' % DEMO_BASE_NAME + base_name = '%s-www' % DEMO_BASE_NAME + image = gce.ex_get_image('debian-7') + size = gce.ex_get_size('n1-standard-1') + number = 3 + metadata = {'items': [{'key': 'startup-script', + 'value': startup_script}]} + lb_nodes = gce.ex_create_multiple_nodes(base_name, size, image, + number, ex_tags=[tag], + ex_metadata=metadata, + ignore_errors=False) + display('Created Nodes', lb_nodes) + + # == Create a Firewall for instances == + print('Creating a Firewall:') + name = '%s-firewall' % DEMO_BASE_NAME + allowed = [{'IPProtocol': 'tcp', + 'ports': ['80']}] + firewall = gce.ex_create_firewall(name, allowed, source_tags=[tag]) + print(' Firewall %s created' % firewall.name) + + # == Create a Health Check == + print('Creating a HealthCheck:') + name = '%s-healthcheck' % DEMO_BASE_NAME + + # These are all the default values, but listed here as an example. To + # create a healthcheck with the defaults, only name is required. + hc = gcelb.ex_create_healthcheck(name, host=None, path='/', port='80', + interval=5, timeout=5, + unhealthy_threshold=2, + healthy_threshold=2) + print(' Healthcheck %s created' % hc.name) + + # == Create Load Balancer == + print('Creating Load Balancer') + name = '%s-lb' % DEMO_BASE_NAME + port = 80 + protocol = 'tcp' + algorithm = None + members = lb_nodes[:2] # Only attach the first two initially + healthchecks = [hc] + balancer = gcelb.create_balancer(name, port, protocol, algorithm, members, + ex_healthchecks=healthchecks) + print(' Load Balancer %s created' % balancer.name) + + # == Attach third Node == + print('Attaching additional node to Load Balancer:') + member = balancer.attach_compute_node(lb_nodes[2]) + print(' Attached %s to %s' % (member.id, balancer.name)) + + # == Show Balancer Members == + members = balancer.list_members() + print('Load Balancer Members:') + for member in members: + print(' ID: %s IP: %s' % (member.id, member.ip)) + + # == Remove a Member == + print('Removing a Member:') + detached = members[0] + detach = balancer.detach_member(detached) + if detach: + print(' Member %s detached from %s' % (detached.id, balancer.name)) + + # == Show Updated Balancer Members == + members = balancer.list_members() + print('Updated Load Balancer Members:') + for member in members: + print(' ID: %s IP: %s' % (member.id, member.ip)) + + # == Reattach Member == + print('Reattaching Member:') + member = balancer.attach_member(detached) + print(' Member %s attached to %s' % (member.id, balancer.name)) + + # == Test Load Balancer by connecting to it multiple times == + print('Sleeping for 10 seconds to stabilize the balancer...') + time.sleep(10) + rounds = 200 + url = 'http://%s/' % balancer.ip + line_length = 75 + print('Connecting to %s %s times:' % (url, rounds)) + for x in range(rounds): + response = url_req.urlopen(url) + if PY3: + output = str(response.read(), encoding='utf-8').strip() + else: + output = response.read().strip() + if 'www-001' in output: + padded_output = output.center(line_length) + elif 'www-002' in output: + padded_output = output.rjust(line_length) + else: + padded_output = output.ljust(line_length) + sys.stdout.write('\r%s' % padded_output) + sys.stdout.flush() + print('') + + if CLEANUP: + balancers = gcelb.list_balancers() + healthchecks = gcelb.ex_list_healthchecks() + nodes = gce.list_nodes(ex_zone='all') + firewalls = gce.ex_list_firewalls() + + print('Cleaning up %s resources created.' % DEMO_BASE_NAME) + clean_up(gce, DEMO_BASE_NAME, nodes, + balancers + healthchecks + firewalls) + +if __name__ == '__main__': + main() diff -Nru libcloud-0.5.0/demos/secrets.py-dist libcloud-0.15.1/demos/secrets.py-dist --- libcloud-0.5.0/demos/secrets.py-dist 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/demos/secrets.py-dist 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make a copy of this file named 'secrets.py' and add your credentials there. +# Note you can run unit tests without setting your credentials. + +BLUEBOX_PARAMS = ('customer_id', 'api_key') +BRIGHTBOX_PARAMS = ('client_id', 'client_secret') +DREAMHOST_PARAMS = ('key',) +EC2_PARAMS = ('access_id', 'secret') +ECP_PARAMS = ('user_name', 'password') +GANDI_PARAMS = ('user',) +GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +#GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication +GCE_KEYWORD_PARAMS = {'project': 'project_name'} +HOSTINGCOM_PARAMS = ('user', 'secret') +IBM_PARAMS = ('user', 'secret') +# OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) +OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) +OPENNEBULA_PARAMS = ('user', 'key') +OPSOURCE_PARAMS = ('user', 'password') +RACKSPACE_PARAMS = ('user', 'key') +SLICEHOST_PARAMS = ('key',) +SOFTLAYER_PARAMS = ('user', 'api_key') +VCLOUD_PARAMS = ('user', 'secret') +VOXEL_PARAMS = ('key', 'secret') +VPSNET_PARAMS = ('user', 'key') diff -Nru libcloud-0.5.0/demos/secrets.py.dist libcloud-0.15.1/demos/secrets.py.dist --- libcloud-0.5.0/demos/secrets.py.dist 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/demos/secrets.py.dist 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copy this file to secrets.py for use with provided examples -# - -EC2_ACCESS_ID='' -EC2_SECRET_KEY='' diff -Nru libcloud-0.5.0/DISCLAIMER libcloud-0.15.1/DISCLAIMER --- libcloud-0.5.0/DISCLAIMER 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/DISCLAIMER 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -Apache Incubator is an effort undergoing incubation at The Apache Software -Foundation (ASF), sponsored by the name of sponsor. Incubation is required of -all newly accepted projects until a further review indicates that the -infrastructure, communications, and decision making process have stabilized in -a manner consistent with other successful ASF projects. While incubation -status is not necessarily a reflection of the completeness or stability of the -code, it does indicate that the project has yet to be fully endorsed by the -ASF. diff -Nru libcloud-0.5.0/example_compute.py libcloud-0.15.1/example_compute.py --- libcloud-0.5.0/example_compute.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/example_compute.py 2014-05-26 15:42:51.000000000 +0000 @@ -12,25 +12,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver EC2 = get_driver(Provider.EC2_US_EAST) -Slicehost = get_driver(Provider.SLICEHOST) Rackspace = get_driver(Provider.RACKSPACE) -drivers = [ EC2('access key id', 'secret key'), - Slicehost('api key'), - Rackspace('username', 'api key') ] +drivers = [EC2('access key id', 'secret key'), + Rackspace('username', 'api key')] -nodes = [ driver.list_nodes() for driver in drivers ] +nodes = [driver.list_nodes() for driver in drivers] -print nodes +print(nodes) # [ , -# , ... ] +# , ... ] # grab the node named "test" -node = filter(lambda x: x.name == 'test', nodes)[0] +node = [n for n in nodes if n.name == 'test'][0] # reboot "test" node.reboot() diff -Nru libcloud-0.5.0/example_dns.py libcloud-0.15.1/example_dns.py --- libcloud-0.5.0/example_dns.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/example_dns.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pprint import pprint + +from libcloud.dns.types import Provider +from libcloud.dns.providers import get_driver + +Zerigo = get_driver(Provider.ZERIGO) + +driver = Zerigo('email', 'key') + +zones = driver.list_zones() +pprint(zones) + +records = zones[0].list_records() +pprint(records) diff -Nru libcloud-0.5.0/example_loadbalancer.py libcloud-0.15.1/example_loadbalancer.py --- libcloud-0.5.0/example_loadbalancer.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/example_loadbalancer.py 2013-08-30 12:21:18.000000000 +0000 @@ -22,6 +22,7 @@ from libcloud.loadbalancer.types import Provider, State from libcloud.loadbalancer.providers import get_driver + def main(): Rackspace = get_driver(Provider.RACKSPACE_US) @@ -29,21 +30,21 @@ balancers = driver.list_balancers() - print balancers + print(balancers) # creating a balancer which balances traffic across two # nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer # itself listens on port 80/tcp new_balancer_name = 'testlb' + os.urandom(4).encode('hex') + members = (Member(None, '192.168.86.1', 80), + Member(None, '192.168.86.2', 8080)) new_balancer = driver.create_balancer(name=new_balancer_name, - algorithm=Algorithm.ROUND_ROBIN, - port=80, - protocol='http', - members=(Member(None, '192.168.86.1', 80), - Member(None, '192.168.86.2', 8080)) - ) + algorithm=Algorithm.ROUND_ROBIN, + port=80, + protocol='http', + members=members) - print new_balancer + print(new_balancer) # wait for balancer to become ready # NOTE: in real life code add timeout to not end up in @@ -54,12 +55,12 @@ if balancer.state == State.RUNNING: break - print "sleeping for 30 seconds for balancers to become ready" + print('sleeping for 30 seconds for balancers to become ready') time.sleep(30) # fetch list of members members = balancer.list_members() - print members + print(members) # remove first member balancer.detach_member(members[0]) diff -Nru libcloud-0.5.0/HACKING libcloud-0.15.1/HACKING --- libcloud-0.5.0/HACKING 2011-04-18 12:58:19.000000000 +0000 +++ libcloud-0.15.1/HACKING 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ - -General Information -=================== - * URL: http://incubator.apache.org/libcloud/devinfo.html - -Git Repositories -=================== - * Official Git Mirror: git://git.apache.org/libcloud.git - * Github Mirror: git://github.com/apache/libcloud.git - -Using The Git-SVN Bridge (For Committers) -========================================= - - $ git clone git://git.apache.org/libcloud libcloud - $ cd libcloud - - $ curl http://git.apache.org/authors.txt > .git/authors.txt - $ git config svn.authorsfile ".git/authors.txt" - - # Optionally, set your Apache commiter info, if different from global - $ git config user.name "Your Name" - $ git config user.email "you@example.org" - - $ git svn init \ - --prefix=origin/ \ - --tags=tags \ - --trunk=trunk \ - --branches=branches \ - https://svn.apache.org/repos/asf/incubator/libcloud - - $ git svn rebase - - To push commits back to SVN: - $ git svn dcommit - -Testing -======= - - Libcloud includes an example secrets.py file at: - test/secrets.py-dist - - To run the test cases, you most likely want to run: - $ cp test/secrets.py-dist test/secrets.py - - This is done to prevent accidental commits of a developers provider credentials. - - To run all suites: - - libcloud$ python setup.py test - running test - ................................................................................................ - ---------------------------------------------------------------------- - Ran 96 tests in 0.182s - - OK - - To run specific tests: - - libcloud$ PYTHONPATH=. python test/compute/test_base.py - ....... - ---------------------------------------------------------------------- - Ran 7 tests in 0.001s - - OK - -Making a release -======= - - We have a script that runs the required setup.py commands and then hashes - and signs the files. To run it: - - cd dist - ./release.sh -u yourusername@apache.org - - This should result in a set of apache-libcloud-${VERSION}.{tar.bz2,zip}{,asc,md5,sha1} - files that are suitable to be uploaded for a release. diff -Nru libcloud-0.5.0/libcloud/base.py libcloud-0.15.1/libcloud/base.py --- libcloud-0.5.0/libcloud/base.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.common.base import RawResponse, Response, LoggingConnection -from libcloud.common.base import LoggingHTTPSConnection, LoggingHTTPConnection -from libcloud.common.base import ConnectionKey, ConnectionUserAndKey -from libcloud.compute.base import Node, NodeSize, NodeImage -from libcloud.compute.base import NodeLocation, NodeAuthSSHKey, NodeAuthPassword -from libcloud.compute.base import NodeDriver, is_private_subnet - -__all__ = ['RawResponse', - 'Response', - 'LoggingConnection', - 'LoggingHTTPSConnection', - 'LoggingHTTPConnection', - 'ConnectionKey', - 'ConnectionUserAndKey', - 'Node', - 'NodeSize', - 'NodeImage', - 'NodeLocation', - 'NodeAuthSSHKey', - 'NodeAuthPassword', - 'NodeDriver', - 'is_private_subnet'] - -from libcloud.utils import deprecated_warning - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/common/abiquo.py libcloud-0.15.1/libcloud/common/abiquo.py --- libcloud-0.5.0/libcloud/common/abiquo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/abiquo.py 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,260 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Abiquo Utilities Module for the Abiquo Driver. + +Common utilities needed by the :class:`AbiquoNodeDriver`. +""" +import base64 + +from libcloud.common.base import ConnectionUserAndKey, PollingConnection +from libcloud.common.base import XmlResponse +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import b +from libcloud.compute.base import NodeState + + +def get_href(element, rel): + """ + Search a RESTLink element in the :class:`AbiquoResponse`. + + Abiquo, as a REST API, it offers self-discovering functionality. + That means that you could walk through the whole API only + navigating from the links offered by the entities. + + This is a basic method to find the 'relations' of an entity searching into + its links. + + For instance, a Rack entity serialized as XML as the following:: + + + + + + + false + 1 + + racacaca + 10 + + 4094 + 2 + 1 + + + + offers link to datacenters (rel='datacenter'), to itself (rel='edit') and + to the machines defined in it (rel='machines') + + A call to this method with the 'rack' element using 'datacenter' as 'rel' + will return: + + 'http://10.60.12.7:80/api/admin/datacenters/1' + + :type element: :class:`xml.etree.ElementTree` + :param element: Xml Entity returned by Abiquo API (required) + :type rel: ``str`` + :param rel: relation link name + :rtype: ``str`` + :return: the 'href' value according to the 'rel' input parameter + """ + links = element.findall('link') + for link in links: + if link.attrib['rel'] == rel: + href = link.attrib['href'] + # href is something like: + # + # 'http://localhost:80/api/admin/enterprises' + # + # we are only interested in '/admin/enterprises/' part + needle = '/api/' + url_path = urlparse.urlparse(href).path + index = url_path.find(needle) + result = url_path[index + len(needle) - 1:] + return result + + +class AbiquoResponse(XmlResponse): + """ + Abiquo XML Response. + + Wraps the response in XML bodies or extract the error data in + case of error. + """ + + # Map between abiquo state and Libcloud State + NODE_STATE_MAP = { + 'NOT_ALLOCATED': NodeState.TERMINATED, + 'ALLOCATED': NodeState.PENDING, + 'CONFIGURED': NodeState.PENDING, + 'ON': NodeState.RUNNING, + 'PAUSED': NodeState.PENDING, + 'OFF': NodeState.PENDING, + 'LOCKED': NodeState.PENDING, + 'UNKNOWN': NodeState.UNKNOWN + } + + def parse_error(self): + """ + Parse the error messages. + + Response body can easily be handled by this class parent + :class:`XmlResponse`, but there are use cases which Abiquo API + does not respond an XML but an HTML. So we need to + handle these special cases. + """ + if self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError(driver=self.connection.driver) + elif self.status == httplib.FORBIDDEN: + raise ForbiddenError(self.connection.driver) + else: + errors = self.parse_body().findall('error') + # Most of the exceptions only have one error + raise LibcloudError(errors[0].findtext('message')) + + def success(self): + """ + Determine if the request was successful. + + Any of the 2XX HTTP response codes are accepted as successfull requests + + :rtype: ``bool`` + :return: successful request or not. + """ + return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT, + httplib.ACCEPTED] + + def async_success(self): + """ + Determinate if async request was successful. + + An async_request retrieves for a task object that can be successfully + retrieved (self.status == OK), but the asyncronous task (the body of + the HTTP response) which we are asking for has finished with an error. + So this method checks if the status code is 'OK' and if the task + has finished successfully. + + :rtype: ``bool`` + :return: successful asynchronous request or not + """ + if self.success(): + # So we have a 'task' object in the body + task = self.parse_body() + return task.findtext('state') == 'FINISHED_SUCCESSFULLY' + else: + return False + + +class AbiquoConnection(ConnectionUserAndKey, PollingConnection): + """ + A Connection to Abiquo API. + + Basic :class:`ConnectionUserAndKey` connection with + :class:`PollingConnection` features for asynchronous tasks. + """ + + responseCls = AbiquoResponse + + def __init__(self, user_id, key, secure=True, host=None, port=None, + url=None, timeout=None): + super(AbiquoConnection, self).__init__(user_id=user_id, key=key, + secure=secure, + host=host, port=port, + url=url, timeout=timeout) + + # This attribute stores data cached across multiple request + self.cache = {} + + def add_default_headers(self, headers): + """ + Add Basic Authentication header to all the requests. + + It injects the 'Authorization: Basic Base64String===' header + in each request + + :type headers: ``dict`` + :param headers: Default input headers + :rtype ``dict`` + :return: Default input headers with the 'Authorization' + header + """ + b64string = b('%s:%s' % (self.user_id, self.key)) + encoded = base64.b64encode(b64string).decode('utf-8') + + authorization = 'Basic ' + encoded + + headers['Authorization'] = authorization + return headers + + def get_poll_request_kwargs(self, response, context, request_kwargs): + """ + Manage polling request arguments. + + Return keyword arguments which are passed to the + :class:`NodeDriver.request` method when polling for the job status. The + Abiquo Asynchronous Response returns and 'acceptedrequest' XmlElement + as the following:: + + + + You can follow the progress in the link + + + We need to extract the href URI to poll. + + :type response: :class:`xml.etree.ElementTree` + :keyword response: Object returned by poll request. + :type request_kwargs: ``dict`` + :keyword request_kwargs: Default request arguments and headers + :rtype: ``dict`` + :return: Modified keyword arguments + """ + accepted_request_obj = response.object + link_poll = get_href(accepted_request_obj, 'status') + + # Override just the 'action' and 'method' keys of the previous dict + request_kwargs['action'] = link_poll + request_kwargs['method'] = 'GET' + return request_kwargs + + def has_completed(self, response): + """ + Decide if the asynchronous job has ended. + + :type response: :class:`xml.etree.ElementTree` + :param response: Response object returned by poll request + :rtype: ``bool`` + :return: Whether the job has completed + """ + task = response.object + task_state = task.findtext('state') + return task_state in ['FINISHED_SUCCESSFULLY', 'ABORTED', + 'FINISHED_UNSUCCESSFULLY'] + + +class ForbiddenError(LibcloudError): + """ + Exception used when credentials are ok but user has not permissions. + """ + + def __init__(self, driver): + message = 'User has not permission to perform this task.' + super(LibcloudError, self).__init__(message, driver) diff -Nru libcloud-0.5.0/libcloud/common/aws.py libcloud-0.15.1/libcloud/common/aws.py --- libcloud-0.5.0/libcloud/common/aws.py 2011-05-10 12:22:57.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/aws.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,18 +13,181 @@ # See the License for the specific language governing permissions and # limitations under the License. -from xml.etree import ElementTree as ET +import base64 +import hmac +import time +from hashlib import sha256 -from libcloud.common.base import Response -from libcloud.common.types import MalformedResponseError +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET -class AWSBaseResponse(Response): - def parse_body(self): - if not self.body: - return None +from libcloud.common.base import ConnectionUserAndKey, XmlResponse, BaseDriver +from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.utils.py3 import b, httplib, urlquote +from libcloud.utils.xml import findtext, findall + + +class AWSBaseResponse(XmlResponse): + namespace = None + + def _parse_error_details(self, element): + """ + Parse code and message from the provided error element. + + :return: ``tuple`` with two elements: (code, message) + :rtype: ``tuple`` + """ + code = findtext(element=element, xpath='Code', + namespace=self.namespace) + message = findtext(element=element, xpath='Message', + namespace=self.namespace) + + return code, message + + +class AWSGenericResponse(AWSBaseResponse): + # There are multiple error messages in AWS, but they all have an Error node + # with Code and Message child nodes. Xpath to select them + # None if the root node *is* the Error node + xpath = None + + # This dict maps CodeName to a specific + # exception class that is raised immediately. + # If a custom exception class is not defined, errors are accumulated and + # returned from the parse_error method. + expections = {} + + def success(self): + return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] + + def parse_error(self): + context = self.connection.context + status = int(self.status) + + # FIXME: Probably ditch this as the forbidden message will have + # corresponding XML. + if status == httplib.FORBIDDEN: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) try: - body = ET.XML(self.body) - except: - raise MalformedResponseError("Failed to parse XML", body=self.body) - return body + body = ET.XML(self.body) + except Exception: + raise MalformedResponseError('Failed to parse XML', + body=self.body, + driver=self.connection.driver) + + if self.xpath: + errs = findall(element=body, xpath=self.xpath, + namespace=self.namespace) + else: + errs = [body] + + msgs = [] + for err in errs: + code, message = self._parse_error_details(element=err) + exceptionCls = self.exceptions.get(code, None) + + if exceptionCls is None: + msgs.append('%s: %s' % (code, message)) + continue + + # Custom exception class is defined, immediately throw an exception + params = {} + if hasattr(exceptionCls, 'kwargs'): + for key in exceptionCls.kwargs: + if key in context: + params[key] = context[key] + + raise exceptionCls(value=message, driver=self.connection.driver, + **params) + + return "\n".join(msgs) + + +class AWSTokenConnection(ConnectionUserAndKey): + def __init__(self, user_id, key, secure=True, + host=None, port=None, url=None, timeout=None, token=None): + self.token = token + super(AWSTokenConnection, self).__init__(user_id, key, secure=secure, + host=host, port=port, url=url, + timeout=timeout) + + def add_default_params(self, params): + # Even though we are adding it to the headers, we need it here too + # so that the token is added to the signature. + if self.token: + params['x-amz-security-token'] = self.token + return super(AWSTokenConnection, self).add_default_params(params) + + def add_default_headers(self, headers): + if self.token: + headers['x-amz-security-token'] = self.token + return super(AWSTokenConnection, self).add_default_headers(headers) + + +class SignedAWSConnection(AWSTokenConnection): + + def add_default_params(self, params): + params['SignatureVersion'] = '2' + params['SignatureMethod'] = 'HmacSHA256' + params['AWSAccessKeyId'] = self.user_id + params['Version'] = self.version + params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) + params['Signature'] = self._get_aws_auth_param(params, self.key, + self.action) + return params + + def _get_aws_auth_param(self, params, secret_key, path='/'): + """ + Creates the signature required for AWS, per + http://bit.ly/aR7GaQ [docs.amazonwebservices.com]: + + StringToSign = HTTPVerb + "\n" + + ValueOfHostHeaderInLowercase + "\n" + + HTTPRequestURI + "\n" + + CanonicalizedQueryString + """ + keys = list(params.keys()) + keys.sort() + pairs = [] + for key in keys: + value = str(params[key]) + pairs.append(urlquote(key, safe='') + '=' + + urlquote(value, safe='-_~')) + + qs = '&'.join(pairs) + + hostname = self.host + if (self.secure and self.port != 443) or \ + (not self.secure and self.port != 80): + hostname += ":" + str(self.port) + + string_to_sign = '\n'.join(('GET', hostname, path, qs)) + + b64_hmac = base64.b64encode( + hmac.new(b(secret_key), b(string_to_sign), + digestmod=sha256).digest() + ) + + return b64_hmac.decode('utf-8') + + +class AWSDriver(BaseDriver): + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=None, region=None, token=None, **kwargs): + self.token = token + super(AWSDriver, self).__init__(key, secret=secret, secure=secure, + host=host, port=port, + api_version=api_version, region=region, + token=token, **kwargs) + + def _ex_connection_class_kwargs(self): + kwargs = super(AWSDriver, self)._ex_connection_class_kwargs() + kwargs['token'] = self.token + return kwargs diff -Nru libcloud-0.5.0/libcloud/common/azure.py libcloud-0.15.1/libcloud/common/azure.py --- libcloud-0.5.0/libcloud/common/azure.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/azure.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,189 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time +import base64 +import hmac + +from hashlib import sha256 + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b +from libcloud.utils.xml import fixxpath + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.base import ConnectionUserAndKey, RawResponse +from libcloud.common.base import XmlResponse + +# Azure API version +API_VERSION = '2012-02-12' + +# The time format for headers in Azure requests +AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' + + +class AzureResponse(XmlResponse): + + valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_error(self, msg=None): + error_msg = 'Unknown error' + + try: + # Azure does give some meaningful errors, but is inconsistent + # Some APIs respond with an XML error. Others just dump HTML + body = self.parse_body() + + if type(body) == ET.Element: + code = body.findtext(fixxpath(xpath='Code')) + message = body.findtext(fixxpath(xpath='Message')) + message = message.split('\n')[0] + error_msg = '%s: %s' % (code, message) + + except MalformedResponseError: + pass + + if msg: + error_msg = '%s - %s' % (msg, error_msg) + + if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: + raise InvalidCredsError(error_msg) + + raise LibcloudError('%s Status code: %d.' % (error_msg, self.status), + driver=self) + + +class AzureRawResponse(RawResponse): + pass + + +class AzureConnection(ConnectionUserAndKey): + """ + Represents a single connection to Azure + """ + + responseCls = AzureResponse + rawResponseCls = AzureRawResponse + + def add_default_params(self, params): + return params + + def pre_connect_hook(self, params, headers): + headers = copy.deepcopy(headers) + + # We have to add a date header in GMT + headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) + headers['x-ms-version'] = API_VERSION + + # Add the authorization header + headers['Authorization'] = self._get_azure_auth_signature( + method=self.method, headers=headers, params=params, + account=self.user_id, secret_key=self.key, path=self.action) + + # Azure cribs about this in 'raw' connections + headers.pop('Host', None) + + return params, headers + + def _get_azure_auth_signature(self, method, headers, params, + account, secret_key, path='/'): + """ + Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, + UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-Encoding + "\n" + + Content-Language + "\n" + + Content-Length + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + If-Modified-Since + "\n" + + If-Match + "\n" + + If-None-Match + "\n" + + If-Unmodified-Since + "\n" + + Range + "\n" + + CanonicalizedHeaders + + CanonicalizedResource; + """ + special_header_values = [] + xms_header_values = [] + param_list = [] + special_header_keys = ['content-encoding', 'content-language', + 'content-length', 'content-md5', + 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', + 'if-unmodified-since', 'range'] + + # Split the x-ms headers and normal headers and make everything + # lower case + headers_copy = {} + for header, value in headers.items(): + header = header.lower() + value = str(value).strip() + if header.startswith('x-ms-'): + xms_header_values.append((header, value)) + else: + headers_copy[header] = value + + # Get the values for the headers in the specific order + for header in special_header_keys: + header = header.lower() # Just for safety + if header in headers_copy: + special_header_values.append(headers_copy[header]) + else: + special_header_values.append('') + + # Prepare the first section of the string to be signed + values_to_sign = [method] + special_header_values + # string_to_sign = '\n'.join([method] + special_header_values) + + # The x-ms-* headers have to be in lower case and sorted + xms_header_values.sort() + + for header, value in xms_header_values: + values_to_sign.append('%s:%s' % (header, value)) + + # Add the canonicalized path + values_to_sign.append('/%s%s' % (account, path)) + + # URL query parameters (sorted and lower case) + for key, value in params.items(): + param_list.append((key.lower(), str(value).strip())) + + param_list.sort() + + for key, value in param_list: + values_to_sign.append('%s:%s' % (key, value)) + + string_to_sign = b('\n'.join(values_to_sign)) + secret_key = b(secret_key) + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() + ) + + return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8')) diff -Nru libcloud-0.5.0/libcloud/common/base.py libcloud-0.15.1/libcloud/common/base.py --- libcloud-0.5.0/libcloud/common/base.py 2011-05-21 15:42:52.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/base.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,36 +13,104 @@ # See the License for the specific language governing permissions and # limitations under the License. -import httplib -import urllib -import StringIO +import os +import sys import ssl +import copy +import binascii +import time + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET from pipes import quote as pquote +try: + import simplejson as json +except: + import json + import libcloud +from libcloud.utils.py3 import PY3, PY25 +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import StringIO +from libcloud.utils.py3 import u +from libcloud.utils.py3 import b + +from libcloud.utils.misc import lowercase_keys +from libcloud.utils.compression import decompress_data +from libcloud.common.types import LibcloudError, MalformedResponseError + from libcloud.httplib_ssl import LibcloudHTTPSConnection -from httplib import HTTPConnection as LibcloudHTTPConnection + +LibcloudHTTPConnection = httplib.HTTPConnection + + +class HTTPResponse(httplib.HTTPResponse): + # On python 2.6 some calls can hang because HEAD isn't quite properly + # supported. + # In particular this happens on S3 when calls are made to get_object to + # objects that don't exist. + # This applies the behaviour from 2.7, fixing the hangs. + def read(self, amt=None): + if self.fp is None: + return '' + + if self._method == 'HEAD': + self.close() + return '' + + return httplib.HTTPResponse.read(self, amt) + class Response(object): """ - A Base Response class to derive from. + A base Response class to derive from. """ - NODE_STATE_MAP = {} - object = None - body = None - status = httplib.OK - headers = {} - error = None - connection = None + status = httplib.OK # Response status code + headers = {} # Response headers + body = None # Raw response body + object = None # Parsed response body - def __init__(self, response): - self.body = response.read() - self.status = response.status - self.headers = dict(response.getheaders()) + error = None # Reason returned by the server. + connection = None # Parent connection class + parse_zero_length_body = False + + def __init__(self, response, connection): + """ + :param response: HTTP response object. (optional) + :type response: :class:`httplib.HTTPResponse` + + :param connection: Parent connection object. + :type connection: :class:`.Connection` + """ + self.connection = connection + + # http.client In Python 3 doesn't automatically lowercase the header + # names + self.headers = lowercase_keys(dict(response.getheaders())) self.error = response.reason + self.status = response.status + + # This attribute is set when using LoggingConnection. + original_data = getattr(response, '_original_data', None) + + if original_data: + # LoggingConnection already decompresses data so it can log it + # which means we don't need to decompress it here. + self.body = response._original_data + else: + self.body = self._decompress_response(body=response.read(), + headers=self.headers) + + if PY3: + self.body = b(self.body).decode('utf-8') if not self.success(): raise Exception(self.parse_error()) @@ -55,7 +123,8 @@ Override in a provider's subclass. - @return: Parsed body. + :return: Parsed body. + :rtype: ``str`` """ return self.body @@ -65,7 +134,8 @@ Override in a provider's subclass. - @return: Parsed error. + :return: Parsed error. + :rtype: ``str`` """ return self.body @@ -76,18 +146,90 @@ The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? - @return: C{True} or C{False} + :rtype: ``bool`` + :return: ``True`` or ``False`` """ - return self.status == httplib.OK or self.status == httplib.CREATED + return self.status in [httplib.OK, httplib.CREATED] + + def _decompress_response(self, body, headers): + """ + Decompress a response body if it is using deflate or gzip encoding. + + :param body: Response body. + :type body: ``str`` + + :param headers: Response headers. + :type headers: ``dict`` + + :return: Decompressed response + :rtype: ``str`` + """ + encoding = headers.get('content-encoding', None) + + if encoding in ['zlib', 'deflate']: + body = decompress_data('zlib', body) + elif encoding in ['gzip', 'x-gzip']: + body = decompress_data('gzip', body) + else: + body = body.strip() + + return body + + +class JsonResponse(Response): + """ + A Base JSON Response class to derive from. + """ + + def parse_body(self): + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + try: + body = json.loads(self.body) + except: + raise MalformedResponseError( + 'Failed to parse JSON', + body=self.body, + driver=self.connection.driver) + return body + + parse_error = parse_body + + +class XmlResponse(Response): + """ + A Base XML Response class to derive from. + """ + + def parse_body(self): + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError('Failed to parse XML', + body=self.body, + driver=self.connection.driver) + return body + + parse_error = parse_body + class RawResponse(Response): - def __init__(self, response=None): + def __init__(self, connection): + """ + :param connection: Parent connection object. + :type connection: :class:`.Connection` + """ self._status = None self._response = None self._headers = {} self._error = None self._reason = None + self.connection = connection @property def response(self): @@ -107,7 +249,7 @@ @property def headers(self): if not self._headers: - self._headers = dict(self.response.getheaders()) + self._headers = lowercase_keys(dict(self.response.getheaders())) return self._headers @property @@ -117,13 +259,13 @@ return self._reason -#TODO: Move this to a better location/package +# TODO: Move this to a better location/package class LoggingConnection(): """ Debug class to log all HTTP(s) requests as they could be made - with the C{curl} command. + with the curl command. - @cvar log: file-like object that logs entries are written to. + :cvar log: file-like object that logs entries are written to. """ log = None @@ -140,32 +282,60 @@ for h in r.getheaders(): ht += "%s: %s\r\n" % (h[0].title(), h[1]) ht += "\r\n" + # this is evil. laugh with me. ha arharhrhahahaha class fakesock: def __init__(self, s): self.s = s - def makefile(self, mode, foo): - return StringIO.StringIO(self.s) + + def makefile(self, *args, **kwargs): + if PY3: + from io import BytesIO + cls = BytesIO + else: + cls = StringIO + + return cls(b(self.s)) rr = r + headers = lowercase_keys(dict(r.getheaders())) + + encoding = headers.get('content-encoding', None) + + if encoding in ['zlib', 'deflate']: + body = decompress_data('zlib', body) + elif encoding in ['gzip', 'x-gzip']: + body = decompress_data('gzip', body) + if r.chunked: ht += "%x\r\n" % (len(body)) - ht += body + ht += u(body) ht += "\r\n0\r\n" else: - ht += body - rr = httplib.HTTPResponse(fakesock(ht), - method=r._method, - debuglevel=r.debuglevel) + ht += u(body) + + if sys.version_info >= (2, 6) and sys.version_info < (2, 7): + cls = HTTPResponse + else: + cls = httplib.HTTPResponse + + rr = cls(sock=fakesock(ht), method=r._method, + debuglevel=r.debuglevel) rr.begin() rv += ht rv += ("\n# -------- end %d:%d response ----------\n" % (id(self), id(r))) + + rr._original_data = body return (rr, rv) def _log_curl(self, method, url, body, headers): cmd = ["curl", "-i"] - cmd.extend(["-X", pquote(method)]) + if method.lower() == 'head': + # HEAD method need special handling + cmd.extend(["--head"]) + else: + cmd.extend(["-X", pquote(method)]) for h in headers: cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) @@ -174,14 +344,19 @@ if body is not None and len(body) > 0: cmd.extend(["--data-binary", pquote(body)]) - cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))]) + cmd.extend(["--compress"]) + cmd.extend([pquote("%s://%s:%d%s" % (self.protocol, self.host, + self.port, url))]) return " ".join(cmd) + class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection): """ Utility Class for logging HTTPS connections """ + protocol = 'https' + def getresponse(self): r = LibcloudHTTPSConnection.getresponse(self) if self.log is not None: @@ -193,17 +368,21 @@ def request(self, method, url, body=None, headers=None): headers.update({'X-LC-Request-ID': str(id(self))}) if self.log is not None: - pre = "# -------- begin %d request ----------\n" % id(self) + pre = "# -------- begin %d request ----------\n" % id(self) self.log.write(pre + self._log_curl(method, url, body, headers) + "\n") self.log.flush() - return LibcloudHTTPSConnection.request(self, method, url, body, headers) + return LibcloudHTTPSConnection.request(self, method, url, body, + headers) + class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection): """ Utility Class for logging HTTP connections """ + protocol = 'http' + def getresponse(self): r = LibcloudHTTPConnection.getresponse(self) if self.log is not None: @@ -215,190 +394,302 @@ def request(self, method, url, body=None, headers=None): headers.update({'X-LC-Request-ID': str(id(self))}) if self.log is not None: - pre = "# -------- begin %d request ----------\n" % id(self) + pre = '# -------- begin %d request ----------\n' % id(self) self.log.write(pre + self._log_curl(method, url, body, headers) + "\n") self.log.flush() return LibcloudHTTPConnection.request(self, method, url, - body, headers) + body, headers) -class ConnectionKey(object): + +class Connection(object): """ A Base Connection class to derive from. """ - #conn_classes = (LoggingHTTPSConnection) + # conn_classes = (LoggingHTTPSConnection) conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection) responseCls = Response rawResponseCls = RawResponse connection = None host = '127.0.0.1' - port = (80, 443) + port = 443 + timeout = None secure = 1 driver = None action = None + cache_busting = False - def __init__(self, key, secure=True, host=None, force_port=None): - """ - Initialize `user_id` and `key`; set `secure` to an C{int} based on - passed value. - """ - self.key = key + allow_insecure = True + + def __init__(self, secure=True, host=None, port=None, url=None, + timeout=None): self.secure = secure and 1 or 0 self.ua = [] + self.context = {} + + if not self.allow_insecure and not secure: + # TODO: We should eventually switch to whitelist instead of + # blacklist approach + raise ValueError('Non https connections are not allowed (use ' + 'secure=True)') + + self.request_path = '' + if host: self.host = host - if force_port: - self.port = (force_port, force_port) + if port is not None: + self.port = port + else: + if self.secure == 1: + self.port = 443 + else: + self.port = 80 + + if url: + (self.host, self.port, self.secure, + self.request_path) = self._tuple_from_url(url) + + if timeout: + self.timeout = timeout + + def set_context(self, context): + if not isinstance(context, dict): + raise TypeError('context needs to be a dictionary') + + self.context = context + + def reset_context(self): + self.context = {} + + def _tuple_from_url(self, url): + secure = 1 + port = None + (scheme, netloc, request_path, param, + query, fragment) = urlparse.urlparse(url) + + if scheme not in ['http', 'https']: + raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url)) + + if scheme == "http": + secure = 0 + + if ":" in netloc: + netloc, port = netloc.rsplit(":") + port = port + + if not port: + if scheme == "http": + port = 80 + else: + port = 443 + + host = netloc + + return (host, port, secure, request_path) - def connect(self, host=None, port=None): + def connect(self, host=None, port=None, base_url=None): """ Establish a connection with the API server. - @type host: C{str} - @param host: Optional host to override our default + :type host: ``str`` + :param host: Optional host to override our default - @type port: C{int} - @param port: Optional port to override our default + :type port: ``int`` + :param port: Optional port to override our default - @returns: A connection + :returns: A connection """ - host = host or self.host - - # port might be included in service url, so pick it if it's present - if ":" in host: - host, port = host.split(":") + # prefer the attribute base_url if its set or sent + connection = None + secure = self.secure + + if getattr(self, 'base_url', None) and base_url is None: + (host, port, + secure, request_path) = self._tuple_from_url(self.base_url) + elif base_url is not None: + (host, port, + secure, request_path) = self._tuple_from_url(base_url) else: - port = port or self.port[self.secure] + host = host or self.host + port = port or self.port kwargs = {'host': host, 'port': int(port)} - connection = self.conn_classes[self.secure](**kwargs) + # Timeout is only supported in Python 2.6 and later + # http://docs.python.org/library/httplib.html#httplib.HTTPConnection + if self.timeout and not PY25: + kwargs.update({'timeout': self.timeout}) + + connection = self.conn_classes[secure](**kwargs) # You can uncoment this line, if you setup a reverse proxy server # which proxies to your endpoint, and lets you easily capture # connections in cleartext when you setup the proxy to do SSL # for you - #connection = self.conn_classes[False]("127.0.0.1", 8080) + # connection = self.conn_classes[False]("127.0.0.1", 8080) self.connection = connection def _user_agent(self): - return 'libcloud/%s (%s)%s' % ( - libcloud.__version__, - self.driver.name, - "".join([" (%s)" % x for x in self.ua])) + user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua]) + + if self.driver: + user_agent = 'libcloud/%s (%s) %s' % ( + libcloud.__version__, + self.driver.name, user_agent_suffix) + else: + user_agent = 'libcloud/%s %s' % ( + libcloud.__version__, user_agent_suffix) + + return user_agent def user_agent_append(self, token): """ Append a token to a user agent string. - Users of the library should call this to uniquely identify thier requests - to a provider. + Users of the library should call this to uniquely identify their + requests to a provider. - @type token: C{str} - @param token: Token to add to the user agent. + :type token: ``str`` + :param token: Token to add to the user agent. """ self.ua.append(token) - def request(self, - action, - params=None, - data='', - headers=None, - method='GET', - raw=False, - host=None): + def request(self, action, params=None, data=None, headers=None, + method='GET', raw=False): """ Request a given `action`. Basically a wrapper around the connection object's `request` that does some helpful pre-processing. - @type action: C{str} - @param action: A path - - @type params: C{dict} - @param params: Optional mapping of additional parameters to send. If - None, leave as an empty C{dict}. + :type action: ``str`` + :param action: A path. This can include arguments. If included, + any extra parameters are appended to the existing ones. + + :type params: ``dict`` + :param params: Optional mapping of additional parameters to send. If + None, leave as an empty ``dict``. + + :type data: ``unicode`` + :param data: A body of data to send with the request. + + :type headers: ``dict`` + :param headers: Extra headers to add to the request + None, leave as an empty ``dict``. - @type data: C{unicode} - @param data: A body of data to send with the request. + :type method: ``str`` + :param method: An HTTP method such as "GET" or "POST". - @type headers: C{dict} - @param headers: Extra headers to add to the request - None, leave as an empty C{dict}. - - @type method: C{str} - @param method: An HTTP method such as "GET" or "POST". - - @type raw: C{bool} - @param raw: True to perform a "raw" request aka only send the headers + :type raw: ``bool`` + :param raw: True to perform a "raw" request aka only send the headers and use the rawResponseCls class. This is used with storage API when uploading a file. - @type host: C{str} - @param host: To which host to send the request. If not specified, - self.host is used. + :return: An :class:`Response` instance. + :rtype: :class:`Response` instance - @return: An instance of type I{responseCls} """ if params is None: params = {} + else: + params = copy.copy(params) + if headers is None: headers = {} + else: + headers = copy.copy(headers) + action = self.morph_action_hook(action) self.action = action self.method = method + # Extend default parameters params = self.add_default_params(params) + + # Add cache busting parameters (if enabled) + if self.cache_busting and method == 'GET': + params = self._add_cache_busting_to_params(params=params) + # Extend default headers headers = self.add_default_headers(headers) + # We always send a user-agent header headers.update({'User-Agent': self._user_agent()}) - host = host or self.host - headers.update({'Host': host}) - # Encode data if necessary - if data != '' and data != None: - data = self.encode_data(data) - if data is not None: - headers.update({'Content-Length': str(len(data))}) + # Indicate that we support gzip and deflate compression + headers.update({'Accept-Encoding': 'gzip,deflate'}) + + port = int(self.port) + + if port not in (80, 443): + headers.update({'Host': "%s:%d" % (self.host, port)}) + else: + headers.update({'Host': self.host}) + + if data: + data = self.encode_data(data) + headers['Content-Length'] = str(len(data)) + elif method.upper() in ['POST', 'PUT'] and not raw: + # Only send Content-Length 0 with POST and PUT request. + # + # Note: Content-Length is not added when using "raw" mode means + # means that headers are upfront and the body is sent at some point + # later on. With raw mode user can specify Content-Length with + # "data" not being set. + headers['Content-Length'] = '0' params, headers = self.pre_connect_hook(params, headers) if params: - url = '?'.join((action, urllib.urlencode(params))) + if '?' in action: + url = '&'.join((action, urlencode(params, doseq=True))) + else: + url = '?'.join((action, urlencode(params, doseq=True))) else: url = action # Removed terrible hack...this a less-bad hack that doesn't execute a # request twice, but it's still a hack. - self.connect(host=host) + self.connect() try: # @TODO: Should we just pass File object as body to request method # instead of dealing with splitting and sending the file ourselves? if raw: self.connection.putrequest(method, url) - for key, value in headers.iteritems(): - self.connection.putheader(key, value) + for key, value in list(headers.items()): + self.connection.putheader(key, str(value)) self.connection.endheaders() else: self.connection.request(method=method, url=url, body=data, headers=headers) - except ssl.SSLError, e: + except ssl.SSLError: + e = sys.exc_info()[1] + self.reset_context() raise ssl.SSLError(str(e)) if raw: - response = self.rawResponseCls() + responseCls = self.rawResponseCls + kwargs = {'connection': self} else: - response = self.responseCls(self.connection.getresponse()) + responseCls = self.responseCls + kwargs = {'connection': self, + 'response': self.connection.getresponse()} + + try: + response = responseCls(**kwargs) + finally: + # Always reset the context after the request has completed + self.reset_context() - response.connection = self return response + def morph_action_hook(self, action): + return self.request_path + action + def add_default_params(self, params): """ Adds default parameters (such as API key, version, etc.) @@ -423,11 +714,11 @@ This hook can perform a final manipulation on the params, headers and url parameters. - @type params: C{dict} - @param params: Request parameters. + :type params: ``dict`` + :param params: Request parameters. - @type headers: C{dict} - @param headers: Request headers. + :type headers: ``dict`` + :param headers: Request headers. """ return params, headers @@ -439,13 +730,239 @@ """ return data + def _add_cache_busting_to_params(self, params): + """ + Add cache busting parameter to the query parameters of a GET request. + + Parameters are only added if "cache_busting" class attribute is set to + True. + + Note: This should only be used with *naughty* providers which use + excessive caching of responses. + """ + cache_busting_value = binascii.hexlify(os.urandom(8)).decode('ascii') + + if isinstance(params, dict): + params['cache-busting'] = cache_busting_value + else: + params.append(('cache-busting', cache_busting_value)) + + return params + + +class PollingConnection(Connection): + """ + Connection class which can also work with the async APIs. + + After initial requests, this class periodically polls for jobs status and + waits until the job has finished. + If job doesn't finish in timeout seconds, an Exception thrown. + """ + poll_interval = 0.5 + timeout = 200 + request_method = 'request' + + def async_request(self, action, params=None, data=None, headers=None, + method='GET', context=None): + """ + Perform an 'async' request to the specified path. Keep in mind that + this function is *blocking* and 'async' in this case means that the + hit URL only returns a job ID which is the periodically polled until + the job has completed. + + This function works like this: + + - Perform a request to the specified path. Response should contain a + 'job_id'. + + - Returned 'job_id' is then used to construct a URL which is used for + retrieving job status. Constructed URL is then periodically polled + until the response indicates that the job has completed or the + timeout of 'self.timeout' seconds has been reached. + + :type action: ``str`` + :param action: A path + + :type params: ``dict`` + :param params: Optional mapping of additional parameters to send. If + None, leave as an empty ``dict``. + + :type data: ``unicode`` + :param data: A body of data to send with the request. + + :type headers: ``dict`` + :param headers: Extra headers to add to the request + None, leave as an empty ``dict``. + + :type method: ``str`` + :param method: An HTTP method such as "GET" or "POST". + + :type context: ``dict`` + :param context: Context dictionary which is passed to the functions + which construct initial and poll URL. + + :return: An :class:`Response` instance. + :rtype: :class:`Response` instance + """ + + request = getattr(self, self.request_method) + kwargs = self.get_request_kwargs(action=action, params=params, + data=data, headers=headers, + method=method, + context=context) + response = request(**kwargs) + kwargs = self.get_poll_request_kwargs(response=response, + context=context, + request_kwargs=kwargs) + + end = time.time() + self.timeout + completed = False + while time.time() < end and not completed: + response = request(**kwargs) + completed = self.has_completed(response=response) + if not completed: + time.sleep(self.poll_interval) + + if not completed: + raise LibcloudError('Job did not complete in %s seconds' % + (self.timeout)) + + return response + + def get_request_kwargs(self, action, params=None, data=None, headers=None, + method='GET', context=None): + """ + Arguments which are passed to the initial request() call inside + async_request. + """ + kwargs = {'action': action, 'params': params, 'data': data, + 'headers': headers, 'method': method} + return kwargs + + def get_poll_request_kwargs(self, response, context, request_kwargs): + """ + Return keyword arguments which are passed to the request() method when + polling for the job status. + + :param response: Response object returned by poll request. + :type response: :class:`HTTPResponse` + + :param request_kwargs: Kwargs previously used to initiate the + poll request. + :type response: ``dict`` + + :return ``dict`` Keyword arguments + """ + raise NotImplementedError('get_poll_request_kwargs not implemented') + + def has_completed(self, response): + """ + Return job completion status. + + :param response: Response object returned by poll request. + :type response: :class:`HTTPResponse` + + :return ``bool`` True if the job has completed, False otherwise. + """ + raise NotImplementedError('has_completed not implemented') + + +class ConnectionKey(Connection): + """ + Base connection class which accepts a single ``key`` argument. + """ + def __init__(self, key, secure=True, host=None, port=None, url=None, + timeout=None): + """ + Initialize `user_id` and `key`; set `secure` to an ``int`` based on + passed value. + """ + super(ConnectionKey, self).__init__(secure=secure, host=host, + port=port, url=url, + timeout=timeout) + self.key = key + + class ConnectionUserAndKey(ConnectionKey): """ - Base connection which accepts a user_id and key + Base connection class which accepts a ``user_id`` and ``key`` argument. """ user_id = None - def __init__(self, user_id, key, secure=True, host=None, port=None): - super(ConnectionUserAndKey, self).__init__(key, secure, host, port) + def __init__(self, user_id, key, secure=True, + host=None, port=None, url=None, timeout=None): + super(ConnectionUserAndKey, self).__init__(key, secure=secure, + host=host, port=port, + url=url, timeout=timeout) self.user_id = user_id + + +class BaseDriver(object): + """ + Base driver class from which other classes can inherit from. + """ + + connectionCls = ConnectionKey + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=None, region=None, **kwargs): + """ + :param key: API key or username to be used (required) + :type key: ``str`` + + :param secret: Secret password to be used (required) + :type secret: ``str`` + + :param secure: Weither to use HTTPS or HTTP. Note: Some providers + only support HTTPS, and it is on by default. + :type secure: ``bool`` + + :param host: Override hostname used for connections. + :type host: ``str`` + + :param port: Override port used for connections. + :type port: ``int`` + + :param api_version: Optional API version. Only used by drivers + which support multiple API versions. + :type api_version: ``str`` + + :param region: Optional driver region. Only used by drivers which + support multiple regions. + :type region: ``str`` + + :rtype: ``None`` + """ + + self.key = key + self.secret = secret + self.secure = secure + args = [self.key] + + if self.secret is not None: + args.append(self.secret) + + args.append(secure) + + if host is not None: + args.append(host) + + if port is not None: + args.append(port) + + self.api_version = api_version + self.region = region + + conn_kwargs = self._ex_connection_class_kwargs() + self.connection = self.connectionCls(*args, **conn_kwargs) + + self.connection.driver = self + self.connection.connect() + + def _ex_connection_class_kwargs(self): + """ + Return extra connection keyword arguments which are passed to the + Connection class constructor. + """ + return {} diff -Nru libcloud-0.5.0/libcloud/common/brightbox.py libcloud-0.15.1/libcloud/common/brightbox.py --- libcloud-0.5.0/libcloud/common/brightbox.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/brightbox.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.compute.types import InvalidCredsError + +from libcloud.utils.py3 import b +from libcloud.utils.py3 import httplib + +try: + import simplejson as json +except ImportError: + import json + + +class BrightboxResponse(JsonResponse): + def success(self): + return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST + + def parse_body(self): + if self.headers['content-type'].split(';')[0] == 'application/json': + return super(BrightboxResponse, self).parse_body() + else: + return self.body + + def parse_error(self): + response = super(BrightboxResponse, self).parse_body() + + if 'error' in response: + if response['error'] in ['invalid_client', 'unauthorized_client']: + raise InvalidCredsError(response['error']) + + return response['error'] + elif 'error_name' in response: + return '%s: %s' % (response['error_name'], response['errors'][0]) + + return self.body + + +class BrightboxConnection(ConnectionUserAndKey): + """ + Connection class for the Brightbox driver + """ + + host = 'api.gb1.brightbox.com' + responseCls = BrightboxResponse + + def _fetch_oauth_token(self): + body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) + + authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' % + (self.user_id, self.key)))).rstrip() + + self.connect() + + headers = { + 'Host': self.host, + 'User-Agent': self._user_agent(), + 'Authorization': authorization, + 'Content-Type': 'application/json', + 'Content-Length': str(len(body)) + } + + response = self.connection.request(method='POST', url='/token', + body=body, headers=headers) + + response = self.connection.getresponse() + + if response.status == httplib.OK: + return json.loads(response.read())['access_token'] + else: + responseCls = BrightboxResponse(response=response, connection=self) + message = responseCls.parse_error() + raise InvalidCredsError(message) + + def add_default_headers(self, headers): + try: + headers['Authorization'] = 'OAuth ' + self.token + except AttributeError: + self.token = self._fetch_oauth_token() + + headers['Authorization'] = 'OAuth ' + self.token + + return headers + + def encode_data(self, data): + return json.dumps(data) diff -Nru libcloud-0.5.0/libcloud/common/cloudsigma.py libcloud-0.15.1/libcloud/common/cloudsigma.py --- libcloud-0.5.0/libcloud/common/cloudsigma.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/cloudsigma.py 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'API_ENDPOINTS_1_0', + 'API_ENDPOINTS_2_0', + 'API_VERSIONS', + 'INSTANCE_TYPES' +] + +# API end-points +API_ENDPOINTS_1_0 = { + 'zrh': { + 'name': 'Zurich', + 'country': 'Switzerland', + 'host': 'api.zrh.cloudsigma.com' + }, + 'lvs': { + 'name': 'Las Vegas', + 'country': 'United States', + 'host': 'api.lvs.cloudsigma.com' + } +} + +API_ENDPOINTS_2_0 = { + 'zrh': { + 'name': 'Zurich', + 'country': 'Switzerland', + 'host': 'zrh.cloudsigma.com' + }, + 'lvs': { + 'name': 'Las Vegas', + 'country': 'United States', + 'host': 'lvs.cloudsigma.com' + }, + 'wdc': { + 'name': 'Washington DC', + 'country': 'United States', + 'host': 'wdc.cloudsigma.com' + } + +} + +DEFAULT_REGION = 'zrh' + +# Supported API versions. +API_VERSIONS = [ + '1.0' # old and deprecated + '2.0' +] + +DEFAULT_API_VERSION = '2.0' + +# CloudSigma doesn't specify special instance types. +# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work, +# 500 MB to 32000 MB for ram +# and 1 GB to 1024 GB for hard drive size. +# Plans in this file are based on examples listed on http://www.cloudsigma +# .com/en/pricing/price-schedules +INSTANCE_TYPES = [ + { + 'id': 'micro-regular', + 'name': 'Micro/Regular instance', + 'cpu': 1100, + 'memory': 640, + 'disk': 10 + 3, + 'bandwidth': None, + }, + { + 'id': 'micro-high-cpu', + 'name': 'Micro/High CPU instance', + 'cpu': 2200, + 'memory': 640, + 'disk': 80, + 'bandwidth': None, + }, + { + 'id': 'standard-small', + 'name': 'Standard/Small instance', + 'cpu': 1100, + 'memory': 1741, + 'disk': 50, + 'bandwidth': None, + }, + { + 'id': 'standard-large', + 'name': 'Standard/Large instance', + 'cpu': 4400, + 'memory': 7680, + 'disk': 250, + 'bandwidth': None, + }, + { + 'id': 'standard-extra-large', + 'name': 'Standard/Extra Large instance', + 'cpu': 8800, + 'memory': 15360, + 'disk': 500, + 'bandwidth': None, + }, + { + 'id': 'high-memory-extra-large', + 'name': 'High Memory/Extra Large instance', + 'cpu': 7150, + 'memory': 17510, + 'disk': 250, + 'bandwidth': None, + }, + { + 'id': 'high-memory-double-extra-large', + 'name': 'High Memory/Double Extra Large instance', + 'cpu': 14300, + 'memory': 32768, + 'disk': 500, + 'bandwidth': None, + }, + { + 'id': 'high-cpu-medium', + 'name': 'High CPU/Medium instance', + 'cpu': 5500, + 'memory': 1741, + 'disk': 150, + 'bandwidth': None, + }, + { + 'id': 'high-cpu-extra-large', + 'name': 'High CPU/Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 500, + 'bandwidth': None, + } +] diff -Nru libcloud-0.5.0/libcloud/common/cloudstack.py libcloud-0.15.1/libcloud/common/cloudstack.py --- libcloud-0.5.0/libcloud/common/cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/cloudstack.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,195 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import hashlib +import copy +import hmac + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import b + +from libcloud.common.types import ProviderError +from libcloud.common.base import ConnectionUserAndKey, PollingConnection +from libcloud.common.base import JsonResponse +from libcloud.common.types import MalformedResponseError +from libcloud.compute.types import InvalidCredsError + + +class CloudStackResponse(JsonResponse): + def parse_error(self): + if self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError('Invalid provider credentials') + + body = self.parse_body() + values = list(body.values())[0] + + if 'errortext' in values: + value = values['errortext'] + else: + value = self.body + + error = ProviderError(value=value, http_code=self.status, + driver=self.connection.driver) + raise error + + +class CloudStackConnection(ConnectionUserAndKey, PollingConnection): + responseCls = CloudStackResponse + poll_interval = 1 + request_method = '_sync_request' + timeout = 600 + + ASYNC_PENDING = 0 + ASYNC_SUCCESS = 1 + ASYNC_FAILURE = 2 + + def encode_data(self, data): + """ + Must of the data is sent as part of query params (eeww), + but in newer versions, userdata argument can be sent as a + urlencoded data in the request body. + """ + if data: + data = urlencode(data) + + return data + + def _make_signature(self, params): + signature = [(k.lower(), v) for k, v in list(params.items())] + signature.sort(key=lambda x: x[0]) + + pairs = [] + for pair in signature: + key = urlquote(str(pair[0]), safe='[]') + value = urlquote(str(pair[1]), safe='[]') + item = '%s=%s' % (key, value) + pairs .append(item) + + signature = '&'.join(pairs) + + signature = signature.lower().replace('+', '%20') + signature = hmac.new(b(self.key), msg=b(signature), + digestmod=hashlib.sha1) + return base64.b64encode(b(signature.digest())) + + def add_default_params(self, params): + params['apiKey'] = self.user_id + params['response'] = 'json' + + return params + + def pre_connect_hook(self, params, headers): + params['signature'] = self._make_signature(params) + + return params, headers + + def _async_request(self, command, action=None, params=None, data=None, + headers=None, method='GET', context=None): + if params: + context = copy.deepcopy(params) + else: + context = {} + + # Command is specified as part of GET call + context['command'] = command + result = super(CloudStackConnection, self).async_request( + action=action, params=params, data=data, headers=headers, + method=method, context=context) + return result['jobresult'] + + def get_request_kwargs(self, action, params=None, data='', headers=None, + method='GET', context=None): + command = context['command'] + request_kwargs = {'command': command, 'action': action, + 'params': params, 'data': data, + 'headers': headers, 'method': method} + return request_kwargs + + def get_poll_request_kwargs(self, response, context, request_kwargs): + job_id = response['jobid'] + params = {'jobid': job_id} + kwargs = {'command': 'queryAsyncJobResult', 'params': params} + return kwargs + + def has_completed(self, response): + status = response.get('jobstatus', self.ASYNC_PENDING) + + if status == self.ASYNC_FAILURE: + msg = response.get('jobresult', {}).get('errortext', status) + raise Exception(msg) + + return status == self.ASYNC_SUCCESS + + def _sync_request(self, command, action=None, params=None, data=None, + headers=None, method='GET'): + """ + This method handles synchronous calls which are generally fast + information retrieval requests and thus return 'quickly'. + """ + # command is always sent as part of "command" query parameter + if params: + params = copy.deepcopy(params) + else: + params = {} + + params['command'] = command + result = self.request(action=self.driver.path, params=params, + data=data, headers=headers, method=method) + + command = command.lower() + + # Work around for older verions which don't return "response" suffix + # in delete ingress rule response command name + if (command == 'revokesecuritygroupingress' and + 'revokesecuritygroupingressresponse' not in result.object): + command = command + else: + command = command + 'response' + + if command not in result.object: + raise MalformedResponseError( + "Unknown response format", + body=result.body, + driver=self.driver) + result = result.object[command] + return result + + +class CloudStackDriverMixIn(object): + host = None + path = None + + connectionCls = CloudStackConnection + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + host = host or self.host + super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host, + port) + + def _sync_request(self, command, action=None, params=None, data=None, + headers=None, method='GET'): + return self.connection._sync_request(command=command, action=action, + params=params, data=data, + headers=headers, method=method) + + def _async_request(self, command, action=None, params=None, data=None, + headers=None, method='GET', context=None): + return self.connection._async_request(command=command, action=action, + params=params, data=data, + headers=headers, method=method, + context=context) diff -Nru libcloud-0.5.0/libcloud/common/gandi.py libcloud-0.15.1/libcloud/common/gandi.py --- libcloud-0.5.0/libcloud/common/gandi.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/gandi.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,189 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gandi driver base classes +""" + +import time +import hashlib +import sys + +from libcloud.utils.py3 import b + +from libcloud.common.base import ConnectionKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection + +# Global constants + +DEFAULT_TIMEOUT = 600 # operation pooling max seconds +DEFAULT_INTERVAL = 20 # seconds between 2 operation.info + + +class GandiException(Exception): + """ + Exception class for Gandi driver + """ + def __str__(self): + return '(%u) %s' % (self.args[0], self.args[1]) + + def __repr__(self): + return '' % (self.args[0], self.args[1]) + + +class GandiResponse(XMLRPCResponse): + """ + A Base Gandi Response class to derive from. + """ + + +class GandiConnection(XMLRPCConnection, ConnectionKey): + """ + Connection class for the Gandi driver + """ + + responseCls = GandiResponse + host = 'rpc.gandi.net' + endpoint = '/xmlrpc/' + + def __init__(self, key, secure=True): + # Note: Method resolution order in this case is + # XMLRPCConnection -> Connection and Connection doesn't take key as the + # first argument so we specify a keyword argument instead. + # Previously it was GandiConnection -> ConnectionKey so it worked fine. + super(GandiConnection, self).__init__(key=key, secure=secure) + self.driver = BaseGandiDriver + + def request(self, method, *args): + args = (self.key, ) + args + return super(GandiConnection, self).request(method, *args) + + +class BaseGandiDriver(object): + """ + Gandi base driver + + """ + connectionCls = GandiConnection + name = 'Gandi' + + # Specific methods for gandi + def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, + check_interval=DEFAULT_INTERVAL): + """ Wait for an operation to succeed""" + + for i in range(0, timeout, check_interval): + try: + op = self.connection.request('operation.info', int(id)).object + + if op['step'] == 'DONE': + return True + if op['step'] in ['ERROR', 'CANCEL']: + return False + except (KeyError, IndexError): + pass + except Exception: + e = sys.exc_info()[1] + raise GandiException(1002, e) + + time.sleep(check_interval) + return False + + +class BaseObject(object): + """Base class for objects not conventional""" + + uuid_prefix = '' + + def __init__(self, id, state, driver): + self.id = str(id) if id else None + self.state = state + self.driver = driver + self.uuid = self.get_uuid() + + def get_uuid(self): + """Unique hash for this object + + :return: ``str`` + + The hash is a function of an SHA1 hash of prefix, the object's ID and + its driver which means that it should be unique between all + interfaces. + TODO : to review + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> vif = driver.create_interface() + >>> vif.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! + """ + hashstring = '%s:%s:%s' % \ + (self.uuid_prefix, self.id, self.driver.type) + return hashlib.sha1(b(hashstring)).hexdigest() + + +class IPAddress(BaseObject): + """ + Provide a common interface for ip addresses + """ + + uuid_prefix = 'inet:' + + def __init__(self, id, state, inet, driver, version=4, extra=None): + super(IPAddress, self).__init__(id, state, driver) + self.inet = inet + self.version = version + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.inet, self.state, self.driver.name)) + + +class NetworkInterface(BaseObject): + """ + Provide a common interface for network interfaces + """ + + uuid_prefix = 'if:' + + def __init__(self, id, state, mac_address, driver, + ips=None, node_id=None, extra=None): + super(NetworkInterface, self).__init__(id, state, driver) + self.mac = mac_address + self.ips = ips or {} + self.node_id = node_id + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.mac, self.state, self.driver.name)) + + +class Disk(BaseObject): + """ + Gandi disk component + """ + def __init__(self, id, state, name, driver, size, extra=None): + super(Disk, self).__init__(id, state, driver) + self.name = name + self.size = size + self.extra = extra or {} + + def __repr__(self): + return ( + ('') + % (self.id, self.name, self.state, self.size, self.driver.name)) diff -Nru libcloud-0.5.0/libcloud/common/gogrid.py libcloud-0.15.1/libcloud/common/gogrid.py --- libcloud-0.5.0/libcloud/common/gogrid.py 2011-04-26 11:30:25.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/gogrid.py 2013-11-29 12:35:04.000000000 +0000 @@ -16,56 +16,53 @@ import hashlib import time -try: - import json -except ImportError: - import simplejson as json +from libcloud.utils.py3 import b from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.types import MalformedResponseError -from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.compute.base import NodeLocation HOST = 'api.gogrid.com' -PORTS_BY_SECURITY = { True: 443, False: 80 } +PORTS_BY_SECURITY = {True: 443, False: 80} API_VERSION = '1.8' -__all__ = ["GoGridResponse", - "GoGridConnection", - "GoGridIpAddress", - "BaseGoGridDriver", +__all__ = [ + "GoGridResponse", + "GoGridConnection", + "GoGridIpAddress", + "BaseGoGridDriver", ] -class GoGridResponse(Response): + +class GoGridResponse(JsonResponse): def __init__(self, *args, **kwargs): - self.driver = BaseGoGridDriver - super(GoGridResponse, self).__init__(*args, **kwargs) + self.driver = BaseGoGridDriver + super(GoGridResponse, self).__init__(*args, **kwargs) def success(self): if self.status == 403: raise InvalidCredsError('Invalid credentials', self.driver) if self.status == 401: - raise InvalidCredsError('API Key has insufficient rights', self.driver) + raise InvalidCredsError('API Key has insufficient rights', + self.driver) if not self.body: return None try: - return json.loads(self.body)['status'] == 'success' + return self.parse_body()['status'] == 'success' except ValueError: raise MalformedResponseError('Malformed reply', - body=self.body, driver=self.driver) - - def parse_body(self): - if not self.body: - return None - return json.loads(self.body) + body=self.body, + driver=self.driver) def parse_error(self): try: - return json.loads(self.body)["list"][0]['message'] + return self.parse_body()["list"][0]["message"] except (ValueError, KeyError): return None + class GoGridConnection(ConnectionUserAndKey): """ Connection class for the GoGrid driver @@ -84,9 +81,15 @@ def get_signature(self, key, secret): """ create sig from md5 of key + secret + time """ - m = hashlib.md5(key+secret+str(int(time.time()))) + m = hashlib.md5(b(key + secret + str(int(time.time())))) return m.hexdigest() + def request(self, action, params=None, data='', headers=None, method='GET', + raw=False): + return super(GoGridConnection, self).request(action, params, data, + headers, method, raw) + + class GoGridIpAddress(object): """ IP Address @@ -99,6 +102,7 @@ self.state = state self.subnet = subnet + class BaseGoGridDriver(object): """GoGrid has common object model for services they provide, like locations and IP, so keep handling of @@ -111,62 +115,63 @@ def _to_ip(self, element): ip = GoGridIpAddress(id=element['id'], - ip=element['ip'], - public=element['public'], - subnet=element['subnet'], - state=element["state"]["name"]) + ip=element['ip'], + public=element['public'], + subnet=element['subnet'], + state=element["state"]["name"]) ip.location = self._to_location(element['datacenter']) return ip def _to_ips(self, object): - return [ self._to_ip(el) - for el in object['list'] ] + return [self._to_ip(el) + for el in object['list']] def _to_location(self, element): location = NodeLocation(id=element['id'], - name=element['name'], - country="US", - driver=self.connection.driver) + name=element['name'], + country="US", + driver=self.connection.driver) return location def _to_locations(self, object): return [self._to_location(el) for el in object['list']] - def ex_list_ips(self, **kwargs): """Return list of IP addresses assigned to the account. - @keyword public: set to True to list only + :keyword public: set to True to list only public IPs or False to list only private IPs. Set to None or not specify at all not to filter by type - @type public: C{bool} - @keyword assigned: set to True to list only addresses + :type public: ``bool`` + + :keyword assigned: set to True to list only addresses assigned to servers, False to list unassigned addresses and set to None or don't set at all not no filter by state - @type assigned: C{bool} - @keyword location: filter IP addresses by location - @type location: L{NodeLocation} - @return: C{list} of L{GoGridIpAddress}es + :type assigned: ``bool`` + + :keyword location: filter IP addresses by location + :type location: :class:`NodeLocation` + + :rtype: ``list`` of :class:`GoGridIpAddress` """ params = {} if "public" in kwargs and kwargs["public"] is not None: params["ip.type"] = {True: "Public", - False: "Private"}[kwargs["public"]] + False: "Private"}[kwargs["public"]] if "assigned" in kwargs and kwargs["assigned"] is not None: params["ip.state"] = {True: "Assigned", - False: "Unassigned"}[kwargs["assigned"]] + False: "Unassigned"}[kwargs["assigned"]] if "location" in kwargs and kwargs['location'] is not None: params['datacenter'] = kwargs['location'].id - ips = self._to_ips( - self.connection.request('/api/grid/ip/list', - params=params).object) + response = self.connection.request('/api/grid/ip/list', params=params) + ips = self._to_ips(response.object) return ips def _get_first_ip(self, location=None): @@ -175,4 +180,4 @@ return ips[0].ip except IndexError: raise LibcloudError('No public unassigned IPs left', - self.driver) + self.driver) diff -Nru libcloud-0.5.0/libcloud/common/google.py libcloud-0.15.1/libcloud/common/google.py --- libcloud-0.5.0/libcloud/common/google.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/google.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,671 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Module for Google Connection and Authentication classes. + +Information about setting up your Google OAUTH2 credentials: + +For libcloud, there are two basic methods for authenticating to Google using +OAUTH2: Service Accounts and Client IDs for Installed Applications. + +Both are initially set up from the Cloud Console_ +_Console: https://cloud.google.com/console + +Setting up Service Account authentication (note that you need the PyCrypto +package installed to use this): + - Go to the Console + - Go to your project and then to "APIs & auth" on the left + - Click on "Credentials" + - Click on "Create New Client ID..." + - Select "Service account" and click on "Create Client ID" + - Download the Private Key (should happen automatically). + - The key that you download is a PKCS12 key. It needs to be converted to + the PEM format. + - Convert the key using OpenSSL (the default password is 'notasecret'): + ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts + -passin pass:notasecret | openssl rsa -out PRIV.pem`` + - Move the .pem file to a safe location. + - To Authenticate, you will need to pass the Service Account's "Email + address" in as the user_id and the path to the .pem file as the key. + +Setting up Installed Application authentication: + - Go to the Console + - Go to your project and then to "APIs & auth" on the left + - Click on "Credentials" + - Select "Installed application" and "Other" then click on + "Create Client ID" + - To Authenticate, pass in the "Client ID" as the user_id and the "Client + secret" as the key + - The first time that you do this, the libcloud will give you a URL to + visit. Copy and paste the URL into a browser. + - When you go to the URL it will ask you to log in (if you aren't already) + and ask you if you want to allow the project access to your account. + - Click on Accept and you will be given a code. + - Paste that code at the prompt given to you by the Google libcloud + connection. + - At that point, a token & refresh token will be stored in your home + directory and will be used for authentication. + +Please remember to secure your keys and access tokens. +""" +from __future__ import with_statement + +try: + import simplejson as json +except ImportError: + import json + +import base64 +import errno +import time +import datetime +import os +import socket +import sys + +from libcloud.utils.py3 import httplib, urlencode, urlparse, PY3 +from libcloud.common.base import (ConnectionUserAndKey, JsonResponse, + PollingConnection) +from libcloud.common.types import (ProviderError, + LibcloudError) + +try: + from Crypto.Hash import SHA256 + from Crypto.PublicKey import RSA + from Crypto.Signature import PKCS1_v1_5 + import Crypto.Random + Crypto.Random.atfork() +except ImportError: + # The pycrypto library is unavailable + SHA256 = None + RSA = None + PKCS1_v1_5 = None + +TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ' + + +class GoogleAuthError(LibcloudError): + """Generic Error class for various authentication errors.""" + def __init__(self, value): + self.value = value + + def __repr__(self): + return repr(self.value) + + +class GoogleBaseError(ProviderError): + def __init__(self, value, http_code, code, driver=None): + self.code = code + super(GoogleBaseError, self).__init__(value, http_code, driver) + + +class InvalidRequestError(GoogleBaseError): + pass + + +class JsonParseError(GoogleBaseError): + pass + + +class ResourceNotFoundError(GoogleBaseError): + pass + + +class QuotaExceededError(GoogleBaseError): + pass + + +class ResourceExistsError(GoogleBaseError): + pass + + +class ResourceInUseError(GoogleBaseError): + pass + + +class GoogleResponse(JsonResponse): + """ + Google Base Response class. + """ + def success(self): + """ + Determine if the request was successful. + + For the Google response class, tag all responses as successful and + raise appropriate Exceptions from parse_body. + + :return: C{True} + """ + return True + + def _get_error(self, body): + """ + Get the error code and message from a JSON response. + + Return just the first error if there are multiple errors. + + :param body: The body of the JSON response dictionary + :type body: ``dict`` + + :return: Tuple containing error code and message + :rtype: ``tuple`` of ``str`` or ``int`` + """ + if 'errors' in body['error']: + err = body['error']['errors'][0] + else: + err = body['error'] + + if 'code' in err: + code = err.get('code') + message = err.get('message') + else: + code = None + message = body.get('error_description', err) + + return (code, message) + + def parse_body(self): + """ + Parse the JSON response body, or raise exceptions as appropriate. + + :return: JSON dictionary + :rtype: ``dict`` + """ + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + json_error = False + try: + body = json.loads(self.body) + except: + # If there is both a JSON parsing error and an unsuccessful http + # response (like a 404), we want to raise the http error and not + # the JSON one, so don't raise JsonParseError here. + body = self.body + json_error = True + + if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]: + if json_error: + raise JsonParseError(body, self.status, None) + elif 'error' in body: + (code, message) = self._get_error(body) + if code == 'QUOTA_EXCEEDED': + raise QuotaExceededError(message, self.status, code) + elif code == 'RESOURCE_ALREADY_EXISTS': + raise ResourceExistsError(message, self.status, code) + elif code.startswith('RESOURCE_IN_USE'): + raise ResourceInUseError(message, self.status, code) + else: + raise GoogleBaseError(message, self.status, code) + else: + return body + + elif self.status == httplib.NOT_FOUND: + if (not json_error) and ('error' in body): + (code, message) = self._get_error(body) + else: + message = body + code = None + raise ResourceNotFoundError(message, self.status, code) + + elif self.status == httplib.BAD_REQUEST: + if (not json_error) and ('error' in body): + (code, message) = self._get_error(body) + else: + message = body + code = None + raise InvalidRequestError(message, self.status, code) + + else: + if (not json_error) and ('error' in body): + (code, message) = self._get_error(body) + else: + message = body + code = None + raise GoogleBaseError(message, self.status, code) + + +class GoogleBaseDriver(object): + name = "Google API" + + +class GoogleBaseAuthConnection(ConnectionUserAndKey): + """ + Base class for Google Authentication. Should be subclassed for specific + types of authentication. + """ + driver = GoogleBaseDriver + responseCls = GoogleResponse + name = 'Google Auth' + host = 'accounts.google.com' + auth_path = '/o/oauth2/auth' + + def __init__(self, user_id, key, scopes=None, + redirect_uri='urn:ietf:wg:oauth:2.0:oob', + login_hint=None, **kwargs): + """ + :param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + :type user_id: ``str`` + + :param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + :type key: ``str`` + + :param scopes: A list of urls defining the scope of authentication + to grant. + :type scopes: ``list`` + + :keyword redirect_uri: The Redirect URI for the authentication + request. See Google OAUTH2 documentation for + more info. + :type redirect_uri: ``str`` + + :keyword login_hint: Login hint for authentication request. Useful + for Installed Application authentication. + :type login_hint: ``str`` + """ + scopes = scopes or [] + + self.scopes = " ".join(scopes) + self.redirect_uri = redirect_uri + self.login_hint = login_hint + + super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs) + + def _now(self): + return datetime.datetime.utcnow() + + def add_default_headers(self, headers): + headers['Content-Type'] = "application/x-www-form-urlencoded" + headers['Host'] = self.host + return headers + + def _token_request(self, request_body): + """ + Return an updated token from a token request body. + + :param request_body: A dictionary of values to send in the body of the + token request. + :type request_body: ``dict`` + + :return: A dictionary with updated token information + :rtype: ``dict`` + """ + data = urlencode(request_body) + now = self._now() + response = self.request('/o/oauth2/token', method='POST', data=data) + token_info = response.object + if 'expires_in' in token_info: + expire_time = now + datetime.timedelta( + seconds=token_info['expires_in']) + token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT) + return token_info + + +class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection): + """Authentication connection for "Installed Application" authentication.""" + def get_code(self): + """ + Give the user a URL that they can visit to authenticate and obtain a + code. This method will ask for that code that the user can paste in. + + :return: Code supplied by the user after authenticating + :rtype: ``str`` + """ + auth_params = {'response_type': 'code', + 'client_id': self.user_id, + 'redirect_uri': self.redirect_uri, + 'scope': self.scopes, + 'state': 'Libcloud Request'} + if self.login_hint: + auth_params['login_hint'] = self.login_hint + + data = urlencode(auth_params) + + url = 'https://%s%s?%s' % (self.host, self.auth_path, data) + print('Please Go to the following URL and sign in:') + print(url) + if PY3: + code = input('Enter Code:') + else: + code = raw_input('Enter Code:') + return code + + def get_new_token(self): + """ + Get a new token. Generally used when no previous token exists or there + is no refresh token + + :return: Dictionary containing token information + :rtype: ``dict`` + """ + # Ask the user for a code + code = self.get_code() + + token_request = {'code': code, + 'client_id': self.user_id, + 'client_secret': self.key, + 'redirect_uri': self.redirect_uri, + 'grant_type': 'authorization_code'} + + return self._token_request(token_request) + + def refresh_token(self, token_info): + """ + Use the refresh token supplied in the token info to get a new token. + + :param token_info: Dictionary containing current token information + :type token_info: ``dict`` + + :return: A dictionary containing updated token information. + :rtype: ``dict`` + """ + if 'refresh_token' not in token_info: + return self.get_new_token() + refresh_request = {'refresh_token': token_info['refresh_token'], + 'client_id': self.user_id, + 'client_secret': self.key, + 'grant_type': 'refresh_token'} + + new_token = self._token_request(refresh_request) + if 'refresh_token' not in new_token: + new_token['refresh_token'] = token_info['refresh_token'] + return new_token + + +class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection): + """Authentication class for "Service Account" authentication.""" + def __init__(self, user_id, key, *args, **kwargs): + """ + Check to see if PyCrypto is available, and convert key file path into a + key string if the key is in a file. + + :param user_id: Email address to be used for Service Account + authentication. + :type user_id: ``str`` + + :param key: The RSA Key or path to file containing the key. + :type key: ``str`` + """ + if SHA256 is None: + raise GoogleAuthError('PyCrypto library required for ' + 'Service Account Authentication.') + # Check to see if 'key' is a file and read the file if it is. + keypath = os.path.expanduser(key) + is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) + if is_file_path: + with open(keypath, 'r') as f: + key = f.read() + super(GoogleServiceAcctAuthConnection, self).__init__( + user_id, key, *args, **kwargs) + + def get_new_token(self): + """ + Get a new token using the email address and RSA Key. + + :return: Dictionary containing token information + :rtype: ``dict`` + """ + # The header is always the same + header = {'alg': 'RS256', 'typ': 'JWT'} + header_enc = base64.urlsafe_b64encode(json.dumps(header)) + + # Construct a claim set + claim_set = {'iss': self.user_id, + 'scope': self.scopes, + 'aud': 'https://accounts.google.com/o/oauth2/token', + 'exp': int(time.time()) + 3600, + 'iat': int(time.time())} + claim_set_enc = base64.urlsafe_b64encode(json.dumps(claim_set)) + + # The message contains both the header and claim set + message = '%s.%s' % (header_enc, claim_set_enc) + # Then the message is signed using the key supplied + key = RSA.importKey(self.key) + hash_func = SHA256.new(message) + signer = PKCS1_v1_5.new(key) + signature = base64.urlsafe_b64encode(signer.sign(hash_func)) + + # Finally the message and signature are sent to get a token + jwt = '%s.%s' % (message, signature) + request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', + 'assertion': jwt} + + return self._token_request(request) + + def refresh_token(self, token_info): + """ + Refresh the current token. + + Service Account authentication doesn't supply a "refresh token" so + this simply gets a new token using the email address/key. + + :param token_info: Dictionary containing token information. + (Not used, but here for compatibility) + :type token_info: ``dict`` + + :return: A dictionary containing updated token information. + :rtype: ``dict`` + """ + return self.get_new_token() + + +class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): + """Base connection class for interacting with Google APIs.""" + driver = GoogleBaseDriver + responseCls = GoogleResponse + host = 'www.googleapis.com' + poll_interval = 2.0 + timeout = 180 + + def __init__(self, user_id, key, auth_type=None, + credential_file=None, scopes=None, **kwargs): + """ + Determine authentication type, set up appropriate authentication + connection and get initial authentication information. + + :param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + :type user_id: ``str`` + + :param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + :type key: ``str`` + + :keyword auth_type: Accepted values are "SA" or "IA" + ("Service Account" or "Installed Application"). + If not supplied, auth_type will be guessed based + on value of user_id. + :type auth_type: ``str`` + + :keyword credential_file: Path to file for caching authentication + information. + :type credential_file: ``str`` + + :keyword scopes: List of OAuth2 scope URLs. The empty default sets + read/write access to Compute, Storage, and DNS. + :type scopes: ``list`` + """ + self.credential_file = credential_file or '~/.gce_libcloud_auth' + + if auth_type is None: + # Try to guess. Service accounts use an email address + # as the user id. + if '@' in user_id: + auth_type = 'SA' + else: + auth_type = 'IA' + + # Default scopes to read/write for compute, storage, and dns. Can + # override this when calling get_driver() or setting in secrets.py + self.scopes = scopes + if not self.scopes: + self.scopes = [ + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/devstorage.full_control', + 'https://www.googleapis.com/auth/ndev.clouddns.readwrite', + ] + self.token_info = self._get_token_info_from_file() + + if auth_type == 'SA': + self.auth_conn = GoogleServiceAcctAuthConnection( + user_id, key, self.scopes, **kwargs) + elif auth_type == 'IA': + self.auth_conn = GoogleInstalledAppAuthConnection( + user_id, key, self.scopes, **kwargs) + else: + raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'') + + if self.token_info is None: + self.token_info = self.auth_conn.get_new_token() + self._write_token_info_to_file() + + self.token_expire_time = datetime.datetime.strptime( + self.token_info['expire_time'], TIMESTAMP_FORMAT) + + super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs) + + python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1], + sys.version_info[2]) + ver_platform = 'Python %s/%s' % (python_ver, sys.platform) + self.user_agent_append(ver_platform) + + def _now(self): + return datetime.datetime.utcnow() + + def add_default_headers(self, headers): + """ + @inherits: :class:`Connection.add_default_headers` + """ + headers['Content-Type'] = "application/json" + headers['Host'] = self.host + return headers + + def pre_connect_hook(self, params, headers): + """ + Check to make sure that token hasn't expired. If it has, get an + updated token. Also, add the token to the headers. + + @inherits: :class:`Connection.pre_connect_hook` + """ + now = self._now() + if self.token_expire_time < now: + self.token_info = self.auth_conn.refresh_token(self.token_info) + self.token_expire_time = datetime.datetime.strptime( + self.token_info['expire_time'], TIMESTAMP_FORMAT) + self._write_token_info_to_file() + headers['Authorization'] = 'Bearer %s' % ( + self.token_info['access_token']) + + return params, headers + + def encode_data(self, data): + """Encode data to JSON""" + return json.dumps(data) + + def request(self, *args, **kwargs): + """ + @inherits: :class:`Connection.request` + """ + # Adds some retry logic for the occasional + # "Connection Reset by peer" error. + retries = 4 + tries = 0 + while tries < (retries - 1): + try: + return super(GoogleBaseConnection, self).request( + *args, **kwargs) + except socket.error: + e = sys.exc_info()[1] + if e.errno == errno.ECONNRESET: + tries = tries + 1 + else: + raise e + # One more time, then give up. + return super(GoogleBaseConnection, self).request(*args, **kwargs) + + def _get_token_info_from_file(self): + """ + Read credential file and return token information. + + :return: Token information dictionary, or None + :rtype: ``dict`` or ``None`` + """ + token_info = None + filename = os.path.realpath(os.path.expanduser(self.credential_file)) + + try: + with open(filename, 'r') as f: + data = f.read() + token_info = json.loads(data) + except IOError: + pass + return token_info + + def _write_token_info_to_file(self): + """ + Write token_info to credential file. + """ + filename = os.path.realpath(os.path.expanduser(self.credential_file)) + data = json.dumps(self.token_info) + with open(filename, 'w') as f: + f.write(data) + + def has_completed(self, response): + """ + Determine if operation has completed based on response. + + :param response: JSON response + :type response: I{responseCls} + + :return: True if complete, False otherwise + :rtype: ``bool`` + """ + if response.object['status'] == 'DONE': + return True + else: + return False + + def get_poll_request_kwargs(self, response, context, request_kwargs): + """ + @inherits: :class:`PollingConnection.get_poll_request_kwargs` + """ + return {'action': response.object['selfLink']} + + def morph_action_hook(self, action): + """ + Update action to correct request path. + + In many places, the Google API returns a full URL to a resource. + This will strip the scheme and host off of the path and just return + the request. Otherwise, it will append the base request_path to + the action. + + :param action: The action to be called in the http request + :type action: ``str`` + + :return: The modified request based on the action + :rtype: ``str`` + """ + if action.startswith('https://'): + u = urlparse.urlsplit(action) + request = urlparse.urlunsplit(('', '', u[2], u[3], u[4])) + else: + request = self.request_path + action + return request diff -Nru libcloud-0.5.0/libcloud/common/hostvirtual.py libcloud-0.15.1/libcloud/common/hostvirtual.py --- libcloud-0.5.0/libcloud/common/hostvirtual.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/hostvirtual.py 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License.You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.compute.types import InvalidCredsError +from libcloud.common.types import LibcloudError + +API_HOST = 'vapi.vr.org' + + +class HostVirtualException(LibcloudError): + def __init__(self, code, message): + self.code = code + self.message = message + self.args = (code, message) + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return '' % (self.code, self.message) + + +class HostVirtualConnection(ConnectionKey): + host = API_HOST + + allow_insecure = False + + def add_default_params(self, params): + params['key'] = self.key + return params + + +class HostVirtualResponse(JsonResponse): + valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, + httplib.NO_CONTENT] + + def parse_body(self): + if not self.body: + return None + + data = json.loads(self.body) + return data + + def parse_error(self): + data = self.parse_body() + + if self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError('%(code)s:%(message)s' % (data['error'])) + elif self.status == httplib.PRECONDITION_FAILED: + raise HostVirtualException( + data['error']['code'], data['error']['message']) + elif self.status == httplib.NOT_FOUND: + raise HostVirtualException( + data['error']['code'], data['error']['message']) + + return self.body + + def success(self): + return self.status in self.valid_response_codes diff -Nru libcloud-0.5.0/libcloud/common/linode.py libcloud-0.15.1/libcloud/common/linode.py --- libcloud-0.5.0/libcloud/common/linode.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/linode.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,176 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.common.types import InvalidCredsError + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import b + +__all__ = [ + 'API_HOST', + 'API_ROOT', + 'LinodeException', + 'LinodeResponse', + 'LinodeConnection' +] + +# Endpoint for the Linode API +API_HOST = 'api.linode.com' +API_ROOT = '/' + +# Constants that map a RAM figure to a PlanID (updated 4/25/14) +LINODE_PLAN_IDS = {2048: '1', + 4096: '3', + 8192: '5', + 16384: '6', + 32768: '7', + 49152: '8', + 65536: '9', + 98304: '11'} + + +class LinodeException(Exception): + """Error originating from the Linode API + + This class wraps a Linode API error, a list of which is available in the + API documentation. All Linode API errors are a numeric code and a + human-readable description. + """ + def __init__(self, code, message): + self.code = code + self.message = message + self.args = (code, message) + + def __str__(self): + return "(%u) %s" % (self.code, self.message) + + def __repr__(self): + return "" % (self.code, self.message) + + +class LinodeResponse(JsonResponse): + """Linode API response + + Wraps the HTTP response returned by the Linode API, which should be JSON in + this structure: + + { + "ERRORARRAY": [ ... ], + "DATA": [ ... ], + "ACTION": " ... " + } + + libcloud does not take advantage of batching, so a response will always + reflect the above format. A few weird quirks are caught here as well.""" + def __init__(self, response, connection): + """Instantiate a LinodeResponse from the HTTP response + + :keyword response: The raw response returned by urllib + :return: parsed :class:`LinodeResponse`""" + + self.connection = connection + + self.headers = dict(response.getheaders()) + self.error = response.reason + self.status = response.status + + self.body = self._decompress_response(body=response.read(), + headers=self.headers) + + if PY3: + self.body = b(self.body).decode('utf-8') + + self.invalid = LinodeException(0xFF, + "Invalid JSON received from server") + + # Move parse_body() to here; we can't be sure of failure until we've + # parsed the body into JSON. + self.objects, self.errors = self.parse_body() + + if not self.success(): + # Raise the first error, as there will usually only be one + raise self.errors[0] + + def parse_body(self): + """Parse the body of the response into JSON objects + + If the response chokes the parser, action and data will be returned as + None and errorarray will indicate an invalid JSON exception. + + :return: ``list`` of objects and ``list`` of errors""" + js = super(LinodeResponse, self).parse_body() + + try: + if isinstance(js, dict): + # solitary response - promote to list + js = [js] + ret = [] + errs = [] + for obj in js: + if ("DATA" not in obj or "ERRORARRAY" not in obj + or "ACTION" not in obj): + ret.append(None) + errs.append(self.invalid) + continue + ret.append(obj["DATA"]) + errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"]) + return (ret, errs) + except: + return (None, [self.invalid]) + + def success(self): + """Check the response for success + + The way we determine success is by the presence of an error in + ERRORARRAY. If one is there, we assume the whole request failed. + + :return: ``bool`` indicating a successful request""" + return len(self.errors) == 0 + + def _make_excp(self, error): + """Convert an API error to a LinodeException instance + + :keyword error: JSON object containing ``ERRORCODE`` and + ``ERRORMESSAGE`` + :type error: dict""" + if "ERRORCODE" not in error or "ERRORMESSAGE" not in error: + return None + if error["ERRORCODE"] == 4: + return InvalidCredsError(error["ERRORMESSAGE"]) + return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"]) + + +class LinodeConnection(ConnectionKey): + """ + A connection to the Linode API + + Wraps SSL connections to the Linode API, automagically injecting the + parameters that the API needs for each request. + """ + host = API_HOST + responseCls = LinodeResponse + + def add_default_params(self, params): + """ + Add parameters that are necessary for every request + + This method adds ``api_key`` and ``api_responseFormat`` to + the request. + """ + params["api_key"] = self.key + # Be explicit about this in case the default changes. + params["api_responseFormat"] = "json" + return params diff -Nru libcloud-0.5.0/libcloud/common/openstack.py libcloud-0.15.1/libcloud/common/openstack.py --- libcloud-0.5.0/libcloud/common/openstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/openstack.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,652 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common utilities for OpenStack +""" +import sys +import datetime + +from libcloud.utils.py3 import httplib +from libcloud.utils.iso8601 import parse_date + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.compute.types import (LibcloudError, InvalidCredsError, + MalformedResponseError) + +try: + import simplejson as json +except ImportError: + import json + +AUTH_API_VERSION = '1.1' + +# Auth versions which contain token expiration information. +AUTH_VERSIONS_WITH_EXPIRES = [ + '1.1', + '2.0', + '2.0_apikey', + '2.0_password' +] + +# How many seconds to substract from the auth token expiration time before +# testing if the token is still valid. +# The time is subtracted to account for the HTTP request latency and prevent +# user from getting "InvalidCredsError" if token is about to expire. +AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5 + +__all__ = [ + 'OpenStackBaseConnection', + 'OpenStackAuthConnection', + 'OpenStackServiceCatalog', + 'OpenStackDriverMixin', + + 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS' +] + + +# @TODO: Refactor for re-use by other openstack drivers +class OpenStackAuthResponse(Response): + def success(self): + return True + + def parse_body(self): + if not self.body: + return None + + if 'content-type' in self.headers: + key = 'content-type' + elif 'Content-Type' in self.headers: + key = 'Content-Type' + else: + raise LibcloudError('Missing content-type header', + driver=OpenStackAuthConnection) + + content_type = self.headers[key] + if content_type.find(';') != -1: + content_type = content_type.split(';')[0] + + if content_type == 'application/json': + try: + data = json.loads(self.body) + except: + raise MalformedResponseError('Failed to parse JSON', + body=self.body, + driver=OpenStackAuthConnection) + elif content_type == 'text/plain': + data = self.body + else: + data = self.body + + return data + + +class OpenStackAuthConnection(ConnectionUserAndKey): + + responseCls = OpenStackAuthResponse + name = 'OpenStack Auth' + timeout = None + + def __init__(self, parent_conn, auth_url, auth_version, user_id, key, + tenant_name=None, timeout=None): + self.parent_conn = parent_conn + # enable tests to use the same mock connection classes. + self.conn_classes = parent_conn.conn_classes + + super(OpenStackAuthConnection, self).__init__( + user_id, key, url=auth_url, timeout=timeout) + + self.auth_version = auth_version + self.auth_url = auth_url + self.driver = self.parent_conn.driver + self.tenant_name = tenant_name + self.timeout = timeout + + self.urls = {} + self.auth_token = None + self.auth_token_expires = None + self.auth_user_info = None + + def morph_action_hook(self, action): + (_, _, _, request_path) = self._tuple_from_url(self.auth_url) + + if request_path == '': + # No path is provided in the auth_url, use action passed to this + # method. + return action + + return request_path + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json; charset=UTF-8' + return headers + + def authenticate(self, force=False): + """ + Authenticate against the keystone api. + + :param force: Forcefully update the token even if it's already cached + and still valid. + :type force: ``bool`` + """ + if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \ + and self.is_token_valid(): + # If token is still valid, there is no need to re-authenticate + return self + + if self.auth_version == "1.0": + return self.authenticate_1_0() + elif self.auth_version == "1.1": + return self.authenticate_1_1() + elif self.auth_version == "2.0" or self.auth_version == "2.0_apikey": + return self.authenticate_2_0_with_apikey() + elif self.auth_version == "2.0_password": + return self.authenticate_2_0_with_password() + else: + raise LibcloudError('Unsupported Auth Version requested') + + def authenticate_1_0(self): + headers = { + 'X-Auth-User': self.user_id, + 'X-Auth-Key': self.key, + } + + resp = self.request('/v1.0', headers=headers, method='GET') + + if resp.status == httplib.UNAUTHORIZED: + # HTTP UNAUTHORIZED (401): auth failed + raise InvalidCredsError() + elif resp.status not in [httplib.NO_CONTENT, httplib.OK]: + body = 'code: %s body:%s headers:%s' % (resp.status, + resp.body, + resp.headers) + raise MalformedResponseError('Malformed response', body=body, + driver=self.driver) + else: + headers = resp.headers + # emulate the auth 1.1 URL list + self.urls = {} + self.urls['cloudServers'] = \ + [{'publicURL': headers.get('x-server-management-url', None)}] + self.urls['cloudFilesCDN'] = \ + [{'publicURL': headers.get('x-cdn-management-url', None)}] + self.urls['cloudFiles'] = \ + [{'publicURL': headers.get('x-storage-url', None)}] + self.auth_token = headers.get('x-auth-token', None) + self.auth_user_info = None + + if not self.auth_token: + raise MalformedResponseError('Missing X-Auth-Token in \ + response headers') + + return self + + def authenticate_1_1(self): + reqbody = json.dumps({'credentials': {'username': self.user_id, + 'key': self.key}}) + resp = self.request('/v1.1/auth', data=reqbody, headers={}, + method='POST') + + if resp.status == httplib.UNAUTHORIZED: + # HTTP UNAUTHORIZED (401): auth failed + raise InvalidCredsError() + elif resp.status != httplib.OK: + body = 'code: %s body:%s' % (resp.status, resp.body) + raise MalformedResponseError('Malformed response', body=body, + driver=self.driver) + else: + try: + body = json.loads(resp.body) + except Exception: + e = sys.exc_info()[1] + raise MalformedResponseError('Failed to parse JSON', e) + + try: + expires = body['auth']['token']['expires'] + + self.auth_token = body['auth']['token']['id'] + self.auth_token_expires = parse_date(expires) + self.urls = body['auth']['serviceCatalog'] + self.auth_user_info = None + except KeyError: + e = sys.exc_info()[1] + raise MalformedResponseError('Auth JSON response is \ + missing required elements', e) + + return self + + def authenticate_2_0_with_apikey(self): + # API Key based authentication uses the RAX-KSKEY extension. + # http://s.apache.org/oAi + data = {'auth': + {'RAX-KSKEY:apiKeyCredentials': + {'username': self.user_id, 'apiKey': self.key}}} + if self.tenant_name: + data['auth']['tenantName'] = self.tenant_name + reqbody = json.dumps(data) + return self.authenticate_2_0_with_body(reqbody) + + def authenticate_2_0_with_password(self): + # Password based authentication is the only 'core' authentication + # method in Keystone at this time. + # 'keystone' - http://s.apache.org/e8h + data = {'auth': + {'passwordCredentials': + {'username': self.user_id, 'password': self.key}}} + if self.tenant_name: + data['auth']['tenantName'] = self.tenant_name + reqbody = json.dumps(data) + return self.authenticate_2_0_with_body(reqbody) + + def authenticate_2_0_with_body(self, reqbody): + resp = self.request('/v2.0/tokens', data=reqbody, + headers={'Content-Type': 'application/json'}, + method='POST') + if resp.status == httplib.UNAUTHORIZED: + raise InvalidCredsError() + elif resp.status not in [httplib.OK, + httplib.NON_AUTHORITATIVE_INFORMATION]: + body = 'code: %s body: %s' % (resp.status, resp.body) + raise MalformedResponseError('Malformed response', body=body, + driver=self.driver) + else: + try: + body = json.loads(resp.body) + except Exception: + e = sys.exc_info()[1] + raise MalformedResponseError('Failed to parse JSON', e) + + try: + access = body['access'] + expires = access['token']['expires'] + + self.auth_token = access['token']['id'] + self.auth_token_expires = parse_date(expires) + self.urls = access['serviceCatalog'] + self.auth_user_info = access.get('user', {}) + except KeyError: + e = sys.exc_info()[1] + raise MalformedResponseError('Auth JSON response is \ + missing required elements', e) + + return self + + def is_token_valid(self): + """ + Return True if the current auth token is already cached and hasn't + expired yet. + + :return: ``True`` if the token is still valid, ``False`` otherwise. + :rtype: ``bool`` + """ + if not self.auth_token: + return False + + if not self.auth_token_expires: + return False + + expires = self.auth_token_expires - \ + datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS) + + time_tuple_expires = expires.utctimetuple() + time_tuple_now = datetime.datetime.utcnow().utctimetuple() + + if time_tuple_now < time_tuple_expires: + return True + + return False + + +class OpenStackServiceCatalog(object): + """ + http://docs.openstack.org/api/openstack-identity-service/2.0/content/ + + This class should be instanciated with the contents of the + 'serviceCatalog' in the auth response. This will do the work of figuring + out which services actually exist in the catalog as well as split them up + by type, name, and region if available + """ + + _auth_version = None + _service_catalog = None + + def __init__(self, service_catalog, ex_force_auth_version=None): + self._auth_version = ex_force_auth_version or AUTH_API_VERSION + self._service_catalog = {} + + # Check this way because there are a couple of different 2.0_* + # auth types. + if '2.0' in self._auth_version: + self._parse_auth_v2(service_catalog) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + self._parse_auth_v1(service_catalog) + else: + raise LibcloudError('auth version "%s" not supported' + % (self._auth_version)) + + def get_catalog(self): + return self._service_catalog + + def get_public_urls(self, service_type=None, name=None): + endpoints = self.get_endpoints(service_type=service_type, + name=name) + + result = [] + for endpoint in endpoints: + if 'publicURL' in endpoint: + result.append(endpoint['publicURL']) + + return result + + def get_endpoints(self, service_type=None, name=None): + eps = [] + + if '2.0' in self._auth_version: + endpoints = self._service_catalog.get(service_type, {}) \ + .get(name, {}) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + endpoints = self._service_catalog.get(name, {}) + + for regionName, values in endpoints.items(): + eps.append(values[0]) + + return eps + + def get_endpoint(self, service_type=None, name=None, region=None): + if '2.0' in self._auth_version: + endpoint = self._service_catalog.get(service_type, {}) \ + .get(name, {}).get(region, []) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + endpoint = self._service_catalog.get(name, {}).get(region, []) + + # ideally an endpoint either isn't found or only one match is found. + if len(endpoint) == 1: + return endpoint[0] + else: + return {} + + def _parse_auth_v1(self, service_catalog): + for service, endpoints in service_catalog.items(): + + self._service_catalog[service] = {} + + for endpoint in endpoints: + region = endpoint.get('region') + + if region not in self._service_catalog[service]: + self._service_catalog[service][region] = [] + + self._service_catalog[service][region].append(endpoint) + + def _parse_auth_v2(self, service_catalog): + for service in service_catalog: + service_type = service['type'] + service_name = service.get('name', None) + + if service_type not in self._service_catalog: + self._service_catalog[service_type] = {} + + if service_name not in self._service_catalog[service_type]: + self._service_catalog[service_type][service_name] = {} + + for endpoint in service.get('endpoints', []): + region = endpoint.get('region', None) + + catalog = self._service_catalog[service_type][service_name] + if region not in catalog: + catalog[region] = [] + + catalog[region].append(endpoint) + + +class OpenStackBaseConnection(ConnectionUserAndKey): + + """ + Base class for OpenStack connections. + + :param user_id: User name to use when authenticating + :type user_id: ``str`` + + :param key: Secret to use when authenticating. + :type key: ``str`` + + :param secure: Use HTTPS? (True by default.) + :type secure: ``bool`` + + :param ex_force_base_url: Base URL for connection requests. If + not specified, this will be determined by authenticating. + :type ex_force_base_url: ``str`` + + :param ex_force_auth_url: Base URL for authentication requests. + :type ex_force_auth_url: ``str`` + + :param ex_force_auth_version: Authentication version to use. If + not specified, defaults to AUTH_API_VERSION. + :type ex_force_auth_version: ``str`` + + :param ex_force_auth_token: Authentication token to use for + connection requests. If specified, the connection will not attempt + to authenticate, and the value of ex_force_base_url will be used to + determine the base request URL. If ex_force_auth_token is passed in, + ex_force_base_url must also be provided. + :type ex_force_auth_token: ``str`` + + :param ex_tenant_name: When authenticating, provide this tenant + name to the identity service. A scoped token will be returned. + Some cloud providers require the tenant name to be provided at + authentication time. Others will use a default tenant if none + is provided. + :type ex_tenant_name: ``str`` + + :param ex_force_service_type: Service type to use when selecting an + service. If not specified, a provider specific default will be used. + :type ex_force_service_type: ``str`` + + :param ex_force_service_name: Service name to use when selecting an + service. If not specified, a provider specific default will be used. + :type ex_force_service_name: ``str`` + + :param ex_force_service_region: Region to use when selecting an + service. If not specified, a provider specific default will be used. + :type ex_force_service_region: ``str`` + """ + + auth_url = None + auth_token = None + auth_token_expires = None + auth_user_info = None + service_catalog = None + service_type = None + service_name = None + service_region = None + _auth_version = None + + def __init__(self, user_id, key, secure=True, + host=None, port=None, timeout=None, + ex_force_base_url=None, + ex_force_auth_url=None, + ex_force_auth_version=None, + ex_force_auth_token=None, + ex_tenant_name=None, + ex_force_service_type=None, + ex_force_service_name=None, + ex_force_service_region=None): + super(OpenStackBaseConnection, self).__init__( + user_id, key, secure=secure, timeout=timeout) + + if ex_force_auth_version: + self._auth_version = ex_force_auth_version + + self._ex_force_base_url = ex_force_base_url + self._ex_force_auth_url = ex_force_auth_url + self._ex_force_auth_token = ex_force_auth_token + self._ex_tenant_name = ex_tenant_name + self._ex_force_service_type = ex_force_service_type + self._ex_force_service_name = ex_force_service_name + self._ex_force_service_region = ex_force_service_region + + if ex_force_auth_token and not ex_force_base_url: + raise LibcloudError( + 'Must also provide ex_force_base_url when specifying ' + 'ex_force_auth_token.') + + if ex_force_auth_token: + self.auth_token = ex_force_auth_token + + if not self._auth_version: + self._auth_version = AUTH_API_VERSION + + auth_url = self._get_auth_url() + + if not auth_url: + raise LibcloudError('OpenStack instance must ' + + 'have auth_url set') + + osa = OpenStackAuthConnection(self, auth_url, self._auth_version, + self.user_id, self.key, + tenant_name=self._ex_tenant_name, + timeout=self.timeout) + self._osa = osa + + def _get_auth_url(self): + """ + Retrieve auth url for this instance using either "ex_force_auth_url" + constructor kwarg of "auth_url" class variable. + """ + auth_url = self.auth_url + + if self._ex_force_auth_url is not None: + auth_url = self._ex_force_auth_url + + return auth_url + + def get_service_catalog(self): + if self.service_catalog is None: + self._populate_hosts_and_request_paths() + + return self.service_catalog + + def get_endpoint(self): + """ + Selects the endpoint to use based on provider specific values, + or overrides passed in by the user when setting up the driver. + + :returns: url of the relevant endpoint for the driver + """ + service_type = self.service_type + service_name = self.service_name + service_region = self.service_region + if self._ex_force_service_type: + service_type = self._ex_force_service_type + if self._ex_force_service_name: + service_name = self._ex_force_service_name + if self._ex_force_service_region: + service_region = self._ex_force_service_region + + ep = self.service_catalog.get_endpoint(service_type=service_type, + name=service_name, + region=service_region) + if 'publicURL' in ep: + return ep['publicURL'] + + raise LibcloudError('Could not find specified endpoint') + + def add_default_headers(self, headers): + headers['X-Auth-Token'] = self.auth_token + headers['Accept'] = self.accept_format + return headers + + def morph_action_hook(self, action): + self._populate_hosts_and_request_paths() + return super(OpenStackBaseConnection, self).morph_action_hook(action) + + def request(self, **kwargs): + return super(OpenStackBaseConnection, self).request(**kwargs) + + def _set_up_connection_info(self, url): + result = self._tuple_from_url(url) + (self.host, self.port, self.secure, self.request_path) = result + + def _populate_hosts_and_request_paths(self): + """ + OpenStack uses a separate host for API calls which is only provided + after an initial authentication request. + """ + osa = self._osa + + if self._ex_force_auth_token: + # If ex_force_auth_token is provided we always hit the api directly + # and never try to authenticate. + # + # Note: When ex_force_auth_token is provided, ex_force_base_url + # must be provided as well. + self._set_up_connection_info(url=self._ex_force_base_url) + return + + if not osa.is_token_valid(): + # Token is not available or it has expired. Need to retrieve a + # new one. + osa.authenticate() # may throw InvalidCreds + + self.auth_token = osa.auth_token + self.auth_token_expires = osa.auth_token_expires + self.auth_user_info = osa.auth_user_info + + # Pull out and parse the service catalog + osc = OpenStackServiceCatalog( + osa.urls, ex_force_auth_version=self._auth_version) + self.service_catalog = osc + + url = self._ex_force_base_url or self.get_endpoint() + self._set_up_connection_info(url=url) + + +class OpenStackDriverMixin(object): + + def __init__(self, *args, **kwargs): + self._ex_force_base_url = kwargs.get('ex_force_base_url', None) + self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None) + self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None) + self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None) + self._ex_tenant_name = kwargs.get('ex_tenant_name', None) + self._ex_force_service_type = kwargs.get('ex_force_service_type', None) + self._ex_force_service_name = kwargs.get('ex_force_service_name', None) + self._ex_force_service_region = kwargs.get('ex_force_service_region', + None) + + def openstack_connection_kwargs(self): + """ + + :rtype: ``dict`` + """ + rv = {} + if self._ex_force_base_url: + rv['ex_force_base_url'] = self._ex_force_base_url + if self._ex_force_auth_token: + rv['ex_force_auth_token'] = self._ex_force_auth_token + if self._ex_force_auth_url: + rv['ex_force_auth_url'] = self._ex_force_auth_url + if self._ex_force_auth_version: + rv['ex_force_auth_version'] = self._ex_force_auth_version + if self._ex_tenant_name: + rv['ex_tenant_name'] = self._ex_tenant_name + if self._ex_force_service_type: + rv['ex_force_service_type'] = self._ex_force_service_type + if self._ex_force_service_name: + rv['ex_force_service_name'] = self._ex_force_service_name + if self._ex_force_service_region: + rv['ex_force_service_region'] = self._ex_force_service_region + return rv diff -Nru libcloud-0.5.0/libcloud/common/rackspace.py libcloud-0.15.1/libcloud/common/rackspace.py --- libcloud-0.5.0/libcloud/common/rackspace.py 2011-05-09 22:32:14.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/rackspace.py 2013-11-29 12:35:04.000000000 +0000 @@ -14,107 +14,11 @@ # limitations under the License. """ -Common utilities for Rackspace Cloud Servers and Cloud Files +Common settings for Rackspace Cloud Servers and Cloud Files """ -import httplib -from urllib2 import urlparse -from libcloud.common.base import ConnectionUserAndKey -from libcloud.compute.types import InvalidCredsError - -AUTH_HOST_US='auth.api.rackspacecloud.com' -AUTH_HOST_UK='lon.auth.api.rackspacecloud.com' -AUTH_API_VERSION = 'v1.0' __all__ = [ - "RackspaceBaseConnection", - "AUTH_HOST_US", - "AUTH_HOST_UK" - ] - -class RackspaceBaseConnection(ConnectionUserAndKey): - def __init__(self, user_id, key, secure): - self.cdn_management_url = None - self.storage_url = None - self.auth_token = None - self.__host = None - super(RackspaceBaseConnection, self).__init__( - user_id, key, secure=secure) - - def add_default_headers(self, headers): - headers['X-Auth-Token'] = self.auth_token - headers['Accept'] = self.accept_format - return headers - - @property - def request_path(self): - return self._get_request_path(url_key=self._url_key) - - @property - def host(self): - # Default to server_host - return self._get_host(url_key=self._url_key) - - def _get_request_path(self, url_key): - value_key = '__request_path_%s' % (url_key) - value = getattr(self, value_key, None) - - if not value: - self._populate_hosts_and_request_paths() - value = getattr(self, value_key, None) - - return value - - def _get_host(self, url_key): - value_key = '__%s' % (url_key) - value = getattr(self, value_key, None) - - if not value: - self._populate_hosts_and_request_paths() - value = getattr(self, value_key, None) - - return value - - def _populate_hosts_and_request_paths(self): - """ - Rackspace uses a separate host for API calls which is only provided - after an initial authentication request. If we haven't made that - request yet, do it here. Otherwise, just return the management host. - """ - if not self.auth_token: - # Initial connection used for authentication - conn = self.conn_classes[self.secure]( - self.auth_host, self.port[self.secure]) - conn.request( - method='GET', - url='/%s' % (AUTH_API_VERSION), - headers={ - 'X-Auth-User': self.user_id, - 'X-Auth-Key': self.key - } - ) - - resp = conn.getresponse() - - if resp.status != httplib.NO_CONTENT: - raise InvalidCredsError() - - headers = dict(resp.getheaders()) - - try: - self.server_url = headers['x-server-management-url'] - self.storage_url = headers['x-storage-url'] - self.cdn_management_url = headers['x-cdn-management-url'] - self.lb_url = self.server_url.replace("servers", "ord.loadbalancers") - self.auth_token = headers['x-auth-token'] - except KeyError: - raise InvalidCredsError() - - for key in ['server_url', 'storage_url', 'cdn_management_url', - 'lb_url']: - scheme, server, request_path, param, query, fragment = ( - urlparse.urlparse(getattr(self, key))) - # Set host to where we want to make further requests to - setattr(self, '__%s' % (key), server) - setattr(self, '__request_path_%s' % (key), request_path) + 'AUTH_URL' +] - conn.close() +AUTH_URL = 'https://auth.api.rackspacecloud.com' diff -Nru libcloud-0.5.0/libcloud/common/types.py libcloud-0.15.1/libcloud/common/types.py --- libcloud-0.5.0/libcloud/common/types.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/types.py 2013-11-29 12:35:04.000000000 +0000 @@ -13,12 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from libcloud.utils.py3 import httplib + __all__ = [ "LibcloudError", "MalformedResponseError", + "ProviderError", "InvalidCredsError", - "InvalidCredsException" - ] + "InvalidCredsException", + "LazyList" +] + class LibcloudError(Exception): """The base class for other libcloud exceptions""" @@ -28,11 +33,15 @@ self.driver = driver def __str__(self): + return self.__repr__() + + def __repr__(self): return ("") + class MalformedResponseError(LibcloudError): """Exception for the cases when a provider returns a malformed response, e.g. you request JSON and provider returns @@ -44,6 +53,9 @@ self.body = body def __str__(self): + return self.__repr__() + + def __repr__(self): return (": " + repr(self.body)) -class InvalidCredsError(LibcloudError): - """Exception used when invalid credentials are used on a provider.""" - def __init__(self, value='Invalid credentials with the provider', +class ProviderError(LibcloudError): + """ + Exception used when provider gives back + error response (HTTP 4xx, 5xx) for a request. + + Specific sub types can be derieved for errors like + HTTP 401 : InvalidCredsError + HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError + """ + + def __init__(self, value, http_code, driver=None): self.value = value + self.http_code = http_code self.driver = driver + def __str__(self): + return self.__repr__() + + def __repr__(self): return repr(self.value) -# Deprecated alias of L{InvalidCredsError} + +class InvalidCredsError(ProviderError): + """Exception used when invalid credentials are used on a provider.""" + + def __init__(self, value='Invalid credentials with the provider', + driver=None): + super(InvalidCredsError, self).__init__(value, + http_code=httplib.UNAUTHORIZED, + driver=driver) + + +# Deprecated alias of :class:`InvalidCredsError` InvalidCredsException = InvalidCredsError + + +class LazyList(object): + + def __init__(self, get_more, value_dict=None): + self._data = [] + self._last_key = None + self._exhausted = False + self._all_loaded = False + self._get_more = get_more + self._value_dict = value_dict or {} + + def __iter__(self): + if not self._all_loaded: + self._load_all() + + data = self._data + for i in data: + yield i + + def __getitem__(self, index): + if index >= len(self._data) and not self._all_loaded: + self._load_all() + + return self._data[index] + + def __len__(self): + self._load_all() + return len(self._data) + + def __repr__(self): + self._load_all() + repr_string = ', ' .join([repr(item) for item in self._data]) + repr_string = '[%s]' % (repr_string) + return repr_string + + def _load_all(self): + while not self._exhausted: + newdata, self._last_key, self._exhausted = \ + self._get_more(last_key=self._last_key, + value_dict=self._value_dict) + self._data.extend(newdata) + self._all_loaded = True diff -Nru libcloud-0.5.0/libcloud/common/xmlrpc.py libcloud-0.15.1/libcloud/common/xmlrpc.py --- libcloud-0.5.0/libcloud/common/xmlrpc.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/common/xmlrpc.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base classes for working with xmlrpc APIs +""" + +import sys + +from libcloud.utils.py3 import xmlrpclib +from libcloud.utils.py3 import httplib +from libcloud.common.base import Response, Connection + + +class ProtocolError(Exception): + pass + + +class ErrorCodeMixin(object): + """ + This is a helper for API's that have a well defined collection of error + codes that are easily parsed out of error messages. It acts as a factory: + it finds the right exception for the error code, fetches any parameters it + needs from the context and raises it. + """ + + exceptions = {} + + def raise_exception_for_error(self, error_code, message): + exceptionCls = self.exceptions.get(error_code, None) + if exceptionCls is None: + return + context = self.connection.context + driver = self.connection.driver + params = {} + if hasattr(exceptionCls, 'kwargs'): + for key in exceptionCls.kwargs: + if key in context: + params[key] = context[key] + raise exceptionCls(value=message, driver=driver, **params) + + +class XMLRPCResponse(ErrorCodeMixin, Response): + + defaultExceptionCls = Exception + + def success(self): + return self.status == httplib.OK + + def parse_body(self): + try: + params, methodname = xmlrpclib.loads(self.body) + if len(params) == 1: + params = params[0] + return params + except xmlrpclib.Fault: + e = sys.exc_info()[1] + self.raise_exception_for_error(e.faultCode, e.faultString) + error_string = '%s: %s' % (e.faultCode, e.faultString) + raise self.defaultExceptionCls(error_string) + + def parse_error(self): + msg = 'Server returned an invalid xmlrpc response (%d)' % (self.status) + raise ProtocolError(msg) + + +class XMLRPCConnection(Connection): + """ + Connection class which can call XMLRPC based API's. + + This class uses the xmlrpclib marshalling and demarshalling code but uses + the http transports provided by libcloud giving it better certificate + validation and debugging helpers than the core client library. + """ + + responseCls = XMLRPCResponse + + def add_default_headers(self, headers): + headers['Content-Type'] = 'text/xml' + return headers + + def request(self, method_name, *args, **kwargs): + """ + Call a given `method_name`. + + :type method_name: ``str`` + :param method_name: A method exposed by the xmlrpc endpoint that you + are connecting to. + + :type args: ``tuple`` + :param args: Arguments to invoke with method with. + """ + endpoint = kwargs.get('endpoint', self.endpoint) + data = xmlrpclib.dumps(args, methodname=method_name, allow_none=True) + return super(XMLRPCConnection, self).request(endpoint, + data=data, + method='POST') diff -Nru libcloud-0.5.0/libcloud/compute/base.py libcloud-0.15.1/libcloud/compute/base.py --- libcloud-0.5.0/libcloud/compute/base.py 2011-05-14 09:15:59.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/base.py 2014-07-02 20:45:28.000000000 +0000 @@ -16,40 +16,107 @@ """ Provides base classes for working with drivers """ + +from __future__ import with_statement + +import sys import time import hashlib import os import socket -import struct +import binascii + +from libcloud.utils.py3 import b +import libcloud.compute.ssh from libcloud.pricing import get_size_price from libcloud.compute.types import NodeState, DeploymentError from libcloud.compute.ssh import SSHClient +from libcloud.common.base import ConnectionKey +from libcloud.common.base import BaseDriver +from libcloud.common.types import LibcloudError +from libcloud.compute.ssh import have_paramiko + +from libcloud.utils.networking import is_private_subnet +from libcloud.utils.networking import is_valid_ip_address + +if have_paramiko: + from paramiko.ssh_exception import SSHException + SSH_TIMEOUT_EXCEPTION_CLASSES = (SSHException, IOError, socket.gaierror, + socket.error) +else: + SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error) + +# How long to wait for the node to come online after creating it +NODE_ONLINE_WAIT_TIMEOUT = 10 * 60 + +# How long to try connecting to a remote SSH server when running a deployment +# script. +SSH_CONNECT_TIMEOUT = 5 * 60 -# @@TR: are the imports below part of the public api for this -# module? They aren't used in here ... -from libcloud.common.base import ConnectionKey, ConnectionUserAndKey -from libcloud.httplib_ssl import LibcloudHTTPSConnection -from libcloud.common.base import LibcloudHTTPConnection __all__ = [ - "Node", - "NodeState", - "NodeSize", - "NodeImage", - "NodeLocation", - "NodeAuthSSHKey", - "NodeAuthPassword", - "NodeDriver", - - # @@TR: do the following need exporting? - "ConnectionKey", - "ConnectionUserAndKey", - "LibcloudHTTPSConnection", - "LibcloudHTTPConnection" - ] + 'Node', + 'NodeState', + 'NodeSize', + 'NodeImage', + 'NodeLocation', + 'NodeAuthSSHKey', + 'NodeAuthPassword', + 'NodeDriver', + + 'StorageVolume', + 'VolumeSnapshot', + + # Deprecated, moved to libcloud.utils.networking + 'is_private_subnet', + 'is_valid_ip_address' +] + + +class UuidMixin(object): + """ + Mixin class for get_uuid function. + """ + + def __init__(self): + self._uuid = None + + def get_uuid(self): + """ + Unique hash for a node, node image, or node size + + The hash is a function of an SHA1 hash of the node, node image, + or node size's ID and its driver which means that it should be + unique between all objects of its type. + In some subclasses (e.g. GoGridNode) there is no ID + available so the public IP address is used. This means that, + unlike a properly done system UUID, the same UUID may mean a + different system install at a different time + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> node.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! -class Node(object): + :rtype: ``str`` + """ + if not self._uuid: + self._uuid = hashlib.sha1(b('%s:%s' % + (self.id, self.driver.type))).hexdigest() + + return self._uuid + + @property + def uuid(self): + return self.get_uuid() + + +class Node(UuidMixin): """ Provide a common interface for handling nodes of all types. @@ -64,7 +131,7 @@ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node = driver.create_node() - >>> node.public_ip[0] + >>> node.public_ips[0] '127.0.0.3' >>> node.name 'dummy-3' @@ -75,7 +142,7 @@ >>> node.name 'dummy-1' - the node keeps a reference to its own driver which means that we + The node keeps a reference to its own driver which means that we can work on nodes from different providers without having to know which is which. @@ -86,63 +153,68 @@ >>> node2.driver.creds 72 - Althrough Node objects can be subclassed, this isn't normally + Although Node objects can be subclassed, this isn't normally done. Instead, any driver specific information is stored in the - "extra" proproperty of the node. + "extra" attribute of the node. >>> node.extra {'foo': 'bar'} - """ - def __init__(self, id, name, state, public_ip, private_ip, - driver, extra=None): - self.id = str(id) if id else None - self.name = name - self.state = state - self.public_ip = public_ip - self.private_ip = private_ip - self.driver = driver - self.uuid = self.get_uuid() - if not extra: - self.extra = {} - else: - self.extra = extra + def __init__(self, id, name, state, public_ips, private_ips, + driver, size=None, image=None, extra=None): + """ + :param id: Node ID. + :type id: ``str`` - def get_uuid(self): - """Unique hash for this node + :param name: Node name. + :type name: ``str`` - @return: C{string} + :param state: Node state. + :type state: :class:`libcloud.compute.types.NodeState` - The hash is a function of an SHA1 hash of the node's ID and - its driver which means that it should be unique between all - nodes. In some subclasses (e.g. GoGrid) there is no ID - available so the public IP address is used. This means that, - unlike a properly done system UUID, the same UUID may mean a - different system install at a different time + :param public_ips: Public IP addresses associated with this node. + :type public_ips: ``list`` - >>> from libcloud.compute.drivers.dummy import DummyNodeDriver - >>> driver = DummyNodeDriver(0) - >>> node = driver.create_node() - >>> node.get_uuid() - 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + :param private_ips: Private IP addresses associated with this node. + :type private_ips: ``list`` + + :param driver: Driver this node belongs to. + :type driver: :class:`.NodeDriver` + + :param size: Size of this node. (optional) + :type size: :class:`.NodeSize` + + :param image: Image of this node. (optional) + :type size: :class:`.NodeImage` + + :param extra: Optional provider specific attributes associated with + this node. + :type extra: ``dict`` - Note, for example, that this example will always produce the - same UUID! """ - return hashlib.sha1("%s:%d" % (self.id,self.driver.type)).hexdigest() + self.id = str(id) if id else None + self.name = name + self.state = state + self.public_ips = public_ips if public_ips else [] + self.private_ips = private_ips if private_ips else [] + self.driver = driver + self.size = size + self.image = image + self.extra = extra or {} + UuidMixin.__init__(self) def reboot(self): - """Reboot this node + """ + Reboot this node - @return: C{bool} + :return: ``bool`` This calls the node's driver and reboots the node >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node = driver.create_node() - >>> from libcloud.compute.types import NodeState >>> node.state == NodeState.RUNNING True >>> node.state == NodeState.REBOOTING @@ -155,9 +227,10 @@ return self.driver.reboot_node(self) def destroy(self): - """Destroy this node + """ + Destroy this node - @return: C{bool} + :return: ``bool`` This calls the node's driver and destroys the node @@ -176,13 +249,13 @@ return self.driver.destroy_node(self) def __repr__(self): - return (('') - % (self.uuid, self.name, self.state, self.public_ip, - self.driver.name)) + return (('') + % (self.uuid, self.name, self.state, self.public_ips, + self.private_ips, self.driver.name)) -class NodeSize(object): +class NodeSize(UuidMixin): """ A Base NodeSize class to derive from. @@ -206,7 +279,34 @@ 4 """ - def __init__(self, id, name, ram, disk, bandwidth, price, driver): + def __init__(self, id, name, ram, disk, bandwidth, price, + driver, extra=None): + """ + :param id: Size ID. + :type id: ``str`` + + :param name: Size name. + :type name: ``str`` + + :param ram: Amount of memory (in MB) provided by this size. + :type ram: ``int`` + + :param disk: Amount of disk storage (in GB) provided by this image. + :type disk: ``int`` + + :param bandwidth: Amount of bandiwdth included with this size. + :type bandwidth: ``int`` + + :param price: Price (in US dollars) of running this node for an hour. + :type price: ``float`` + + :param driver: Driver this size belongs to. + :type driver: :class:`.NodeDriver` + + :param extra: Optional provider specific attributes associated with + this size. + :type extra: ``dict`` + """ self.id = str(id) self.name = name self.ram = ram @@ -214,6 +314,8 @@ self.bandwidth = bandwidth self.price = price self.driver = driver + self.extra = extra or {} + UuidMixin.__init__(self) def __repr__(self): return (('>> node = driver.create_node(image=image) - """ def __init__(self, id, name, driver, extra=None): + """ + :param id: Image ID. + :type id: ``str`` + + :param name: Image name. + :type name: ``str`` + + :param driver: Driver this image belongs to. + :type driver: :class:`.NodeDriver` + + :param extra: Optional provided specific attributes associated with + this image. + :type extra: ``dict`` + """ self.id = str(id) self.name = name self.driver = driver - if not extra: - self.extra = {} - else: - self.extra = extra + self.extra = extra or {} + UuidMixin.__init__(self) + def __repr__(self): return (('') % (self.id, self.name, self.driver.name)) + class NodeLocation(object): """ A physical location where nodes can be. @@ -269,14 +384,29 @@ """ def __init__(self, id, name, country, driver): + """ + :param id: Location ID. + :type id: ``str`` + + :param name: Location name. + :type name: ``str`` + + :param country: Location country. + :type country: ``str`` + + :param driver: Driver this location belongs to. + :type driver: :class:`.NodeDriver` + """ self.id = str(id) self.name = name self.country = country self.driver = driver + def __repr__(self): return (('') % (self.id, self.name, self.country, self.driver.name)) + class NodeAuthSSHKey(object): """ An SSH key to be installed for authentication to a node. @@ -289,23 +419,198 @@ >>> k = NodeAuthSSHKey(pubkey) >>> k - """ + def __init__(self, pubkey): + """ + :param pubkey: Public key matetiral. + :type pubkey: ``str`` + """ self.pubkey = pubkey + def __repr__(self): return '' + class NodeAuthPassword(object): """ A password to be used for authentication to a node. """ - def __init__(self, password): + def __init__(self, password, generated=False): + """ + :param password: Password. + :type password: ``str`` + + :type generated: ``True`` if this password was automatically generated, + ``False`` otherwise. + """ self.password = password + self.generated = generated + def __repr__(self): return '' -class NodeDriver(object): + +class StorageVolume(UuidMixin): + """ + A base StorageVolume class to derive from. + """ + + def __init__(self, id, name, size, driver, extra=None): + """ + :param id: Storage volume ID. + :type id: ``str`` + + :param name: Storage volume name. + :type name: ``str`` + + :param size: Size of this volume (in GB). + :type size: ``int`` + + :param driver: Driver this image belongs to. + :type driver: :class:`.NodeDriver` + + :param extra: Optional provider specific attributes. + :type extra: ``dict`` + """ + self.id = id + self.name = name + self.size = size + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def list_snapshots(self): + """ + :rtype: ``list`` of ``VolumeSnapshot`` + """ + return self.driver.list_volume_snapshots(volume=self) + + def attach(self, node, device=None): + """ + Attach this volume to a node. + + :param node: Node to attach volume to + :type node: :class:`.Node` + + :param device: Where the device is exposed, + e.g. '/dev/sdb (optional) + :type device: ``str`` + + :return: ``True`` if attach was successful, ``False`` otherwise. + :rtype: ``bool`` + """ + + return self.driver.attach_volume(node=node, volume=self, device=device) + + def detach(self): + """ + Detach this volume from its node + + :return: ``True`` if detach was successful, ``False`` otherwise. + :rtype: ``bool`` + """ + + return self.driver.detach_volume(volume=self) + + def snapshot(self, name): + """ + Creates a snapshot of this volume. + + :return: Created snapshot. + :rtype: ``VolumeSnapshot`` + """ + return self.driver.create_volume_snapshot(volume=self, name=name) + + def destroy(self): + """ + Destroy this storage volume. + + :return: ``True`` if destroy was successful, ``False`` otherwise. + :rtype: ``bool`` + """ + + return self.driver.destroy_volume(volume=self) + + def __repr__(self): + return '' % ( + self.id, self.size, self.driver.name) + + +class VolumeSnapshot(object): + """ + A base VolumeSnapshot class to derive from. + """ + def __init__(self, id, driver, size=None, extra=None): + """ + VolumeSnapshot constructor. + + :param id: Snapshot ID. + :type id: ``str`` + + :param size: A snapshot size in GB. + :type size: ``int`` + + :param extra: Provider depends parameters for snapshot. + :type extra: ``dict`` + """ + self.id = id + self.driver = driver + self.size = size + self.extra = extra or {} + + def destroy(self): + """ + Destroys this snapshot. + + :rtype: ``bool`` + """ + return self.driver.destroy_volume_snapshot(snapshot=self) + + def __repr__(self): + return ('' % + (self.id, self.size, self.driver.name)) + + +class KeyPair(object): + """ + Represents a SSH key pair. + """ + + def __init__(self, name, public_key, fingerprint, driver, private_key=None, + extra=None): + """ + Constructor. + + :keyword name: Name of the key pair object. + :type name: ``str`` + + :keyword fingerprint: Key fingerprint. + :type fingerprint: ``str`` + + :keyword public_key: Public key in OpenSSH format. + :type public_key: ``str`` + + :keyword private_key: Private key in PEM format. + :type private_key: ``str`` + + :keyword extra: Provider specific attributes associated with this + key pair. (optional) + :type extra: ``dict`` + """ + self.name = name + self.fingerprint = fingerprint + self.public_key = public_key + self.private_key = private_key + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return ('' % + (self.name, self.fingerprint, self.driver.name)) + + +class NodeDriver(BaseDriver): """ A base NodeDriver class to derive from @@ -319,13 +624,15 @@ name = None type = None port = None - features = {"create_node": []} + features = {'create_node': []} + """ List of available features for a driver. - - L{create_node} - - ssh_key: Supports L{NodeAuthSSHKey} as an authentication method - for nodes. - - password: Supports L{NodeAuthPassword} as an authentication + - :meth:`libcloud.compute.base.NodeDriver.create_node` + - ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication + method for nodes. + - password: Supports :class:`.NodeAuthPassword` as an + authentication method for nodes. - generates_password: Returns a password attribute on the Node object returned from creation. @@ -333,280 +640,838 @@ NODE_STATE_MAP = {} - def __init__(self, key, secret=None, secure=True, host=None, port=None): + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=None, **kwargs): + super(NodeDriver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port, + api_version=api_version, **kwargs) + + def list_nodes(self): """ - @keyword key: API key or username to used - @type key: str + List all nodes. - @keyword secret: Secret password to be used - @type secret: str + :return: list of node objects + :rtype: ``list`` of :class:`.Node` + """ + raise NotImplementedError( + 'list_nodes not implemented for this driver') - @keyword secure: Weither to use HTTPS or HTTP. Note: Some providers - only support HTTPS, and it is on by default. - @type secure: bool + def list_sizes(self, location=None): + """ + List sizes on a provider - @keyword host: Override hostname used for connections. - @type host: str + :param location: The location at which to list sizes + :type location: :class:`.NodeLocation` - @keyword port: Override port used for connections. - @type port: int + :return: list of node size objects + :rtype: ``list`` of :class:`.NodeSize` """ - self.key = key - self.secret = secret - self.secure = secure - args = [self.key] + raise NotImplementedError( + 'list_sizes not implemented for this driver') - if self.secret != None: - args.append(self.secret) + def list_locations(self): + """ + List data centers for a provider - args.append(secure) + :return: list of node location objects + :rtype: ``list`` of :class:`.NodeLocation` + """ + raise NotImplementedError( + 'list_locations not implemented for this driver') - if host != None: - args.append(host) + def create_node(self, **kwargs): + """ + Create a new node instance. This instance will be started + automatically. - if port != None: - args.append(port) + Not all hosting API's are created equal and to allow libcloud to + support as many as possible there are some standard supported + variations of ``create_node``. These are declared using a + ``features`` API. + You can inspect ``driver.features['create_node']`` to see what + variation of the API you are dealing with: + + ``ssh_key`` + You can inject a public key into a new node allows key based SSH + authentication. + ``password`` + You can inject a password into a new node for SSH authentication. + If no password is provided libcloud will generated a password. + The password will be available as + ``return_value.extra['password']``. + ``generates_password`` + The hosting provider will generate a password. It will be returned + to you via ``return_value.extra['password']``. + + Some drivers allow you to set how you will authenticate with the + instance that is created. You can inject this initial authentication + information via the ``auth`` parameter. + + If a driver supports the ``ssh_key`` feature flag for ``created_node`` + you can upload a public key into the new instance:: + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> auth = NodeAuthSSHKey('pubkey data here') + >>> node = driver.create_node("test_node", auth=auth) + + If a driver supports the ``password`` feature flag for ``create_node`` + you can set a password:: + + >>> driver = DummyNodeDriver(0) + >>> auth = NodeAuthPassword('mysecretpassword') + >>> node = driver.create_node("test_node", auth=auth) + + If a driver supports the ``password`` feature and you don't provide the + ``auth`` argument libcloud will assign a password:: + + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node("test_node") + >>> password = node.extra['password'] + + A password will also be returned in this way for drivers that declare + the ``generates_password`` feature, though in that case the password is + actually provided to the driver API by the hosting provider rather than + generated by libcloud. + + You can only pass a :class:`.NodeAuthPassword` or + :class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if + has the corresponding feature flag. - self.connection = self.connectionCls(*args) + :param name: String with a name for this new node (required) + :type name: ``str`` - self.connection.driver = self - self.connection.connect() + :param size: The size of resources allocated to this node. + (required) + :type size: :class:`.NodeSize` - def create_node(self, **kwargs): - """Create a new node instance. + :param image: OS Image to boot on node. (required) + :type image: :class:`.NodeImage` - @keyword name: String with a name for this new node (required) - @type name: str + :param location: Which data center to create a node in. If empty, + undefined behavior will be selected. (optional) + :type location: :class:`.NodeLocation` - @keyword size: The size of resources allocated to this node. - (required) - @type size: L{NodeSize} + :param auth: Initial authentication information for the node + (optional) + :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword` - @keyword image: OS Image to boot on node. (required) - @type image: L{NodeImage} + :return: The newly created node. + :rtype: :class:`.Node` + """ + raise NotImplementedError( + 'create_node not implemented for this driver') - @keyword location: Which data center to create a node in. If empty, - undefined behavoir will be selected. (optional) - @type location: L{NodeLocation} + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + In order to be able to SSH into a created node access credentials are + required. + + A user can pass either a :class:`.NodeAuthPassword` or + :class:`.NodeAuthSSHKey` to the ``auth`` argument. If the + ``create_node`` implementation supports that kind if credential (as + declared in ``self.features['create_node']``) then it is passed on to + ``create_node``. Otherwise it is not passed on to ``create_node`` and + it is only used for authentication. + + If the ``auth`` parameter is not supplied but the driver declares it + supports ``generates_password`` then the password returned by + ``create_node`` will be used to SSH into the server. + + Finally, if the ``ssh_key_file`` is supplied that key will be used to + SSH into the server. + + This function may raise a :class:`DeploymentException`, if a + create_node call was successful, but there is a later error (like SSH + failing or timing out). This exception includes a Node object which + you may want to destroy if incomplete deployments are not desirable. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> from libcloud.compute.deployment import ScriptDeployment + >>> from libcloud.compute.deployment import MultiStepDeployment + >>> from libcloud.compute.base import NodeAuthSSHKey + >>> driver = DummyNodeDriver(0) + >>> key = NodeAuthSSHKey('...') # read from file + >>> script = ScriptDeployment("yum -y install emacs strace tcpdump") + >>> msd = MultiStepDeployment([key, script]) + >>> def d(): + ... try: + ... driver.deploy_node(deploy=msd) + ... except NotImplementedError: + ... print ("not implemented for dummy driver") + >>> d() + not implemented for dummy driver + + Deploy node is typically not overridden in subclasses. The + existing implementation should be able to handle most such. + + :param deploy: Deployment to run once machine is online and + available to SSH. + :type deploy: :class:`Deployment` + + :param ssh_username: Optional name of the account which is used + when connecting to + SSH server (default is root) + :type ssh_username: ``str`` + + :param ssh_alternate_usernames: Optional list of ssh usernames to + try to connect with if using the + default one fails + :type ssh_alternate_usernames: ``list`` + + :param ssh_port: Optional SSH server port (default is 22) + :type ssh_port: ``int`` + + :param ssh_timeout: Optional SSH connection timeout in seconds + (default is 10) + :type ssh_timeout: ``float`` - @keyword auth: Initial authentication information for the node + :param auth: Initial authentication information for the node (optional) - @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword` - @return: The newly created L{Node}. + :param ssh_key: A path (or paths) to an SSH private key with which + to attempt to authenticate. (optional) + :type ssh_key: ``str`` or ``list`` of ``str`` + + :param timeout: How many seconds to wait before timing out. + (default is 600) + :type timeout: ``int`` + + :param max_tries: How many times to retry if a deployment fails + before giving up (default is 3) + :type max_tries: ``int`` + + :param ssh_interface: The interface to wait for. Default is + 'public_ips', other option is 'private_ips'. + :type ssh_interface: ``str`` """ - raise NotImplementedError, \ - 'create_node not implemented for this driver' + if not libcloud.compute.ssh.have_paramiko: + raise RuntimeError('paramiko is not installed. You can install ' + + 'it using pip: pip install paramiko') + + if 'auth' in kwargs: + auth = kwargs['auth'] + if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)): + raise NotImplementedError( + 'If providing auth, only NodeAuthSSHKey or' + 'NodeAuthPassword is supported') + elif 'ssh_key' in kwargs: + # If an ssh_key is provided we can try deploy_node + pass + elif 'create_node' in self.features: + f = self.features['create_node'] + if 'generates_password' not in f and "password" not in f: + raise NotImplementedError( + 'deploy_node not implemented for this driver') + else: + raise NotImplementedError( + 'deploy_node not implemented for this driver') + + node = self.create_node(**kwargs) + max_tries = kwargs.get('max_tries', 3) + + password = None + if 'auth' in kwargs: + if isinstance(kwargs['auth'], NodeAuthPassword): + password = kwargs['auth'].password + elif 'password' in node.extra: + password = node.extra['password'] + + ssh_interface = kwargs.get('ssh_interface', 'public_ips') + + # Wait until node is up and running and has IP assigned + try: + node, ip_addresses = self.wait_until_running( + nodes=[node], + wait_period=3, + timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT), + ssh_interface=ssh_interface)[0] + except Exception: + e = sys.exc_info()[1] + raise DeploymentError(node=node, original_exception=e, driver=self) + + ssh_username = kwargs.get('ssh_username', 'root') + ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', []) + ssh_port = kwargs.get('ssh_port', 22) + ssh_timeout = kwargs.get('ssh_timeout', 10) + ssh_key_file = kwargs.get('ssh_key', None) + timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT) + + deploy_error = None + + for username in ([ssh_username] + ssh_alternate_usernames): + try: + self._connect_and_run_deployment_script( + task=kwargs['deploy'], node=node, + ssh_hostname=ip_addresses[0], ssh_port=ssh_port, + ssh_username=username, ssh_password=password, + ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout, + timeout=timeout, max_tries=max_tries) + except Exception: + # Try alternate username + # Todo: Need to fix paramiko so we can catch a more specific + # exception + e = sys.exc_info()[1] + deploy_error = e + else: + # Script successfully executed, don't try alternate username + deploy_error = None + break + + if deploy_error is not None: + raise DeploymentError(node=node, original_exception=deploy_error, + driver=self) + + return node + + def reboot_node(self, node): + """ + Reboot a node. + + :param node: The node to be rebooted + :type node: :class:`.Node` + + :return: True if the reboot was successful, otherwise False + :rtype: ``bool`` + """ + raise NotImplementedError( + 'reboot_node not implemented for this driver') def destroy_node(self, node): - """Destroy a node. + """ + Destroy a node. Depending upon the provider, this may destroy all data associated with the node, including backups. - @return: C{bool} True if the destroy was successful, otherwise False + :param node: The node to be destroyed + :type node: :class:`.Node` + + :return: True if the destroy was successful, False otherwise. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'destroy_node not implemented for this driver' + raise NotImplementedError( + 'destroy_node not implemented for this driver') - def reboot_node(self, node): + ## + # Volume and snapshot management methods + ## + + def list_volumes(self): """ - Reboot a node. - @return: C{bool} True if the reboot was successful, otherwise False + List storage volumes. + + :rtype: ``list`` of :class:`.StorageVolume` """ - raise NotImplementedError, \ - 'reboot_node not implemented for this driver' + raise NotImplementedError( + 'list_volumes not implemented for this driver') - def list_nodes(self): + def list_volume_snapshots(self, volume): """ - List all nodes - @return: C{list} of L{Node} objects + List snapshots for a storage volume. + + :rtype: ``list`` of :class:`VolumeSnapshot` """ - raise NotImplementedError, \ - 'list_nodes not implemented for this driver' + raise NotImplementedError( + 'list_volume_snapshots not implemented for this driver') + + def create_volume(self, size, name, location=None, snapshot=None): + """ + Create a new volume. + + :param size: Size of volume in gigabytes (required) + :type size: ``int`` + + :param name: Name of the volume to be created + :type name: ``str`` + + :param location: Which data center to create a volume in. If + empty, undefined behavior will be selected. + (optional) + :type location: :class:`.NodeLocation` + + :param snapshot: Name of snapshot from which to create the new + volume. (optional) + :type snapshot: ``str`` + + :return: The newly created volume. + :rtype: :class:`StorageVolume` + """ + raise NotImplementedError( + 'create_volume not implemented for this driver') + + def create_volume_snapshot(self, volume, name): + """ + Creates a snapshot of the storage volume. + + :rtype: :class:`VolumeSnapshot` + """ + raise NotImplementedError( + 'create_volume_snapshot not implemented for this driver') + + def attach_volume(self, node, volume, device=None): + """ + Attaches volume to node. + + :param node: Node to attach volume to. + :type node: :class:`.Node` + + :param volume: Volume to attach. + :type volume: :class:`.StorageVolume` + + :param device: Where the device is exposed, e.g. '/dev/sdb' + :type device: ``str`` + + :rytpe: ``bool`` + """ + raise NotImplementedError('attach not implemented for this driver') + + def detach_volume(self, volume): + """ + Detaches a volume from a node. + + :param volume: Volume to be detached + :type volume: :class:`.StorageVolume` + + :rtype: ``bool`` + """ + + raise NotImplementedError('detach not implemented for this driver') + + def destroy_volume(self, volume): + """ + Destroys a storage volume. + + :param volume: Volume to be destroyed + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + + raise NotImplementedError( + 'destroy_volume not implemented for this driver') + + def destroy_volume_snapshot(self, snapshot): + """ + Destroys a snapshot. + + :rtype: :class:`bool` + """ + raise NotImplementedError( + 'destroy_volume_snapshot not implemented for this driver') + + ## + # Image management methods + ## def list_images(self, location=None): """ - List images on a provider - @return: C{list} of L{NodeImage} objects + List images on a provider. + + :param location: The location at which to list images. + :type location: :class:`.NodeLocation` + + :return: list of node image objects. + :rtype: ``list`` of :class:`.NodeImage` """ - raise NotImplementedError, \ - 'list_images not implemented for this driver' + raise NotImplementedError( + 'list_images not implemented for this driver') - def list_sizes(self, location=None): + def create_image(self, node, name, description=None): """ - List sizes on a provider - @return: C{list} of L{NodeSize} objects + Creates an image from a node object. + + :param node: Node to run the task on. + :type node: :class:`.Node` + + :param name: name for new image. + :type name: ``str`` + + :param description: description for new image. + :type name: ``description`` + + :rtype: :class:`.NodeImage`: + :return: NodeImage instance on success. + """ - raise NotImplementedError, \ - 'list_sizes not implemented for this driver' + raise NotImplementedError( + 'create_image not implemented for this driver') - def list_locations(self): + def delete_image(self, node_image): """ - List data centers for a provider - @return: C{list} of L{NodeLocation} objects + Deletes a node image from a provider. + + :param node_image: Node image object. + :type node_image: :class:`.NodeImage` + + :return: ``True`` if delete_image was successful, ``False`` otherwise. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'list_locations not implemented for this driver' - def deploy_node(self, **kwargs): + raise NotImplementedError( + 'delete_image not implemented for this driver') + + def get_image(self, image_id): """ - Create a new node, and start deployment. + Returns a single node image from a provider. - Depends on a Provider Driver supporting either using a specific password - or returning a generated password. + :param image_id: Node to run the task on. + :type image_id: ``str`` - This function may raise a L{DeploymentException}, if a create_node - call was successful, but there is a later error (like SSH failing or - timing out). This exception includes a Node object which you may want - to destroy if incomplete deployments are not desirable. + :rtype :class:`.NodeImage`: + :return: NodeImage instance on success. + """ + raise NotImplementedError( + 'get_image not implemented for this driver') - @keyword deploy: Deployment to run once machine is online and availble to SSH. - @type deploy: L{Deployment} + def copy_image(self, source_region, node_image, name, description=None): + """ + Copies an image from a source region to the current region. - @keyword ssh_username: Optional name of the account which is used when connecting to - SSH server (default is root) - @type ssh_username: C{str} + :param source_region: Region to copy the node from. + :type source_region: ``str`` - @keyword ssh_port: Optional SSH server port (default is 22) - @type ssh_port: C{int} + :param node_image: NodeImage to copy. + :type node_image: :class`.NodeImage`: - @keyword ssh_timeout: Optional SSH connection timeout in seconds - (default is None) - @type ssh_timeout: C{float} + :param name: name for new image. + :type name: ``str`` - See L{NodeDriver.create_node} for more keyword args. + :param description: description for new image. + :type name: ``str`` - >>> from libcloud.compute.drivers.dummy import DummyNodeDriver - >>> from libcloud.deployment import ScriptDeployment, MultiStepDeployment - >>> from libcloud.compute.base import NodeAuthSSHKey - >>> driver = DummyNodeDriver(0) - >>> key = NodeAuthSSHKey('...') # read from file - >>> script = ScriptDeployment("yum -y install emacs strace tcpdump") - >>> msd = MultiStepDeployment([key, script]) - >>> def d(): - ... try: - ... node = driver.deploy_node(deploy=msd) - ... except NotImplementedError: - ... print "not implemented for dummy driver" - >>> d() - not implemented for dummy driver + :rtype: :class:`.NodeImage`: + :return: NodeImage instance on success. + """ + raise NotImplementedError( + 'copy_image not implemented for this driver') - Deploy node is typically not overridden in subclasses. The - existing implementation should be able to handle most such. + ## + # SSH key pair management methods + ## + + def list_key_pairs(self): """ - # TODO: support ssh keys - # FIX: this method is too long and complicated - WAIT_PERIOD=3 - password = None + List all the available key pair objects. - if 'generates_password' not in self.features["create_node"]: - if 'password' not in self.features["create_node"]: - raise NotImplementedError, \ - 'deploy_node not implemented for this driver' + :rtype: ``list`` of :class:`.KeyPair` objects + """ + raise NotImplementedError( + 'list_key_pairs not implemented for this driver') - if not kwargs.has_key('auth'): - kwargs['auth'] = NodeAuthPassword(os.urandom(16).encode('hex')) + def get_key_pair(self, name): + """ + Retrieve a single key pair. - password = kwargs['auth'].password - node = self.create_node(**kwargs) - try: - if 'generates_password' in self.features["create_node"]: - password = node.extra.get('password') - start = time.time() - end = start + (60 * 15)# FIX: this should be soft-coded - while time.time() < end: - # need to wait until we get a public IP address. - # TODO: there must be a better way of doing this - time.sleep(WAIT_PERIOD) - nodes = self.list_nodes() - nodes = filter(lambda n: n.uuid == node.uuid, nodes) - if len(nodes) == 0: - raise DeploymentError( - node, - ("Booted node[%s] " % node - + "is missing from list_nodes.")) - if len(nodes) > 1: - raise DeploymentError( - node, - ("Booted single node[%s], " % node - + "but multiple nodes have same UUID")) - - node = nodes[0] - - if (node.public_ip is not None - and node.public_ip != "" - and node.state == NodeState.RUNNING): - break - - ssh_username = kwargs.get('ssh_username', 'root') - ssh_port = kwargs.get('ssh_port', 22) - ssh_timeout = kwargs.get('ssh_timeout', 20) - - client = SSHClient(hostname=node.public_ip[0], - port=ssh_port, username=ssh_username, - password=password, - timeout=ssh_timeout) - - while time.time() < end: - try: - client.connect() - except (IOError, socket.gaierror, socket.error), e: - # Retry if a connection is refused or timeout - # occured - client.close() - time.sleep(WAIT_PERIOD) - continue - - max_tries, tries = 3, 0 - while tries < max_tries: - try: - n = kwargs["deploy"].run(node, client) - client.close() - raise - except Exception, e: - tries += 1 - if tries >= max_tries: - raise DeploymentError(node, - 'Failed after %d tries' % (max_tries)) - - except DeploymentError: - raise - except Exception, e: - raise DeploymentError(node, e) - return n + :param name: Name of the key pair to retrieve. + :type name: ``str`` - def _get_size_price(self, size_id): - return get_size_price(driver_type='compute', - driver_name=self.api_name, - size_id=size_id) + :rtype: :class:`.KeyPair` + """ + raise NotImplementedError( + 'get_key_pair not implemented for this driver') + def create_key_pair(self, name): + """ + Create a new key pair object. -def is_private_subnet(ip): - """ - Utility function to check if an IP address is inside a private subnet. + :param name: Key pair name. + :type name: ``str`` + """ + raise NotImplementedError( + 'create_key_pair not implemented for this driver') - @type ip: C{str} - @keyword ip: IP address to check + def import_key_pair_from_string(self, name, key_material): + """ + Import a new public key from string. - @return: C{bool} if the specified IP address is private. - """ - priv_subnets = [ {'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, - {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, - {'subnet': '192.168.0.0', 'mask': '255.255.0.0'} ] + :param name: Key pair name. + :type name: ``str`` + + :param key_material: Public key material. + :type key_material: ``str`` + + :rtype: :class:`.KeyPair` object + """ + raise NotImplementedError( + 'import_key_pair_from_string not implemented for this driver') + + def import_key_pair_from_file(self, name, key_file_path): + """ + Import a new public key from string. + + :param name: Key pair name. + :type name: ``str`` + + :param key_file_path: Path to the public key file. + :type key_file_path: ``str`` + + :rtype: :class:`.KeyPair` object + """ + key_file_path = os.path.expanduser(key_file_path) - ip = struct.unpack('I',socket.inet_aton(ip))[0] + with open(key_file_path, 'r') as fp: + key_material = fp.read() - for network in priv_subnets: - subnet = struct.unpack('I',socket.inet_aton(network['subnet']))[0] - mask = struct.unpack('I',socket.inet_aton(network['mask']))[0] + return self.import_key_pair_from_string(name=name, + key_material=key_material) - if (ip & mask) == (subnet & mask): + def delete_key_pair(self, key_pair): + """ + Delete an existing key pair. + + :param key_pair: Key pair object. + :type key_pair: :class`.KeyPair` + """ + raise NotImplementedError( + 'delete_key_pair not implemented for this driver') + + def wait_until_running(self, nodes, wait_period=3, timeout=600, + ssh_interface='public_ips', force_ipv4=True): + """ + Block until the provided nodes are considered running. + + Node is considered running when it's state is "running" and when it has + at least one IP address assigned. + + :param nodes: List of nodes to wait for. + :type nodes: ``list`` of :class:`.Node` + + :param wait_period: How many seconds to wait between each loop + iteration. (default is 3) + :type wait_period: ``int`` + + :param timeout: How many seconds to wait before giving up. + (default is 600) + :type timeout: ``int`` + + :param ssh_interface: Which attribute on the node to use to obtain + an IP address. Valid options: public_ips, + private_ips. Default is public_ips. + :type ssh_interface: ``str`` + + :param force_ipv4: Ignore IPv6 addresses (default is True). + :type force_ipv4: ``bool`` + + :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and + list of ip_address on success. + :rtype: ``list`` of ``tuple`` + """ + def is_supported(address): + """ + Return True for supported address. + """ + if force_ipv4 and not is_valid_ip_address(address=address, + family=socket.AF_INET): + return False return True - return False + def filter_addresses(addresses): + """ + Return list of supported addresses. + """ + return [address for address in addresses if is_supported(address)] + + if ssh_interface not in ['public_ips', 'private_ips']: + raise ValueError('ssh_interface argument must either be' + + 'public_ips or private_ips') + + start = time.time() + end = start + timeout + + uuids = set([node.uuid for node in nodes]) + + while time.time() < end: + all_nodes = self.list_nodes() + matching_nodes = list([node for node in all_nodes + if node.uuid in uuids]) + + if len(matching_nodes) > len(uuids): + found_uuids = [node.uuid for node in matching_nodes] + msg = ('Unable to match specified uuids ' + + '(%s) with existing nodes. Found ' % (uuids) + + 'multiple nodes with same uuid: (%s)' % (found_uuids)) + raise LibcloudError(value=msg, driver=self) + + running_nodes = [node for node in matching_nodes + if node.state == NodeState.RUNNING] + addresses = [filter_addresses(getattr(node, ssh_interface)) + for node in running_nodes] + + if len(running_nodes) == len(uuids) == len(addresses): + return list(zip(running_nodes, addresses)) + else: + time.sleep(wait_period) + continue + + raise LibcloudError(value='Timed out after %s seconds' % (timeout), + driver=self) + + def _get_and_check_auth(self, auth): + """ + Helper function for providers supporting :class:`.NodeAuthPassword` or + :class:`.NodeAuthSSHKey` + + Validates that only a supported object type is passed to the auth + parameter and raises an exception if it is not. + + If no :class:`.NodeAuthPassword` object is provided but one is expected + then a password is automatically generated. + """ + + if isinstance(auth, NodeAuthPassword): + if 'password' in self.features['create_node']: + return auth + raise LibcloudError( + 'Password provided as authentication information, but password' + 'not supported', driver=self) + + if isinstance(auth, NodeAuthSSHKey): + if 'ssh_key' in self.features['create_node']: + return auth + raise LibcloudError( + 'SSH Key provided as authentication information, but SSH Key' + 'not supported', driver=self) + + if 'password' in self.features['create_node']: + value = os.urandom(16) + value = binascii.hexlify(value).decode('ascii') + return NodeAuthPassword(value, generated=True) + + if auth: + raise LibcloudError( + '"auth" argument provided, but it was not a NodeAuthPassword' + 'or NodeAuthSSHKey object', driver=self) + + def _wait_until_running(self, node, wait_period=3, timeout=600, + ssh_interface='public_ips', force_ipv4=True): + # This is here for backward compatibility and will be removed in the + # next major release + return self.wait_until_running(nodes=[node], wait_period=wait_period, + timeout=timeout, + ssh_interface=ssh_interface, + force_ipv4=force_ipv4) + + def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): + """ + Try to connect to the remote SSH server. If a connection times out or + is refused it is retried up to timeout number of seconds. + + :param ssh_client: A configured SSHClient instance + :type ssh_client: ``SSHClient`` + + :param wait_period: How many seconds to wait between each loop + iteration. (default is 1.5) + :type wait_period: ``int`` + + :param timeout: How many seconds to wait before giving up. + (default is 300) + :type timeout: ``int`` + + :return: ``SSHClient`` on success + """ + start = time.time() + end = start + timeout + + while time.time() < end: + try: + ssh_client.connect() + except SSH_TIMEOUT_EXCEPTION_CLASSES: + e = sys.exc_info()[1] + message = str(e).lower() + expected_msg = 'no such file or directory' + + if isinstance(e, IOError) and expected_msg in message: + # Propagate (key) file doesn't exist errors + raise e + + # Retry if a connection is refused, timeout occurred, + # or the connection fails due to failed authentication. + ssh_client.close() + time.sleep(wait_period) + continue + else: + return ssh_client + + raise LibcloudError(value='Could not connect to the remote SSH ' + + 'server. Giving up.', driver=self) + + def _connect_and_run_deployment_script(self, task, node, ssh_hostname, + ssh_port, ssh_username, + ssh_password, ssh_key_file, + ssh_timeout, timeout, max_tries): + """ + Establish an SSH connection to the node and run the provided deployment + task. + + :rtype: :class:`.Node`: + :return: Node instance on success. + """ + ssh_client = SSHClient(hostname=ssh_hostname, + port=ssh_port, username=ssh_username, + password=ssh_password, + key_files=ssh_key_file, + timeout=ssh_timeout) + + ssh_client = self._ssh_client_connect(ssh_client=ssh_client, + timeout=timeout) + + # Execute the deployment task + node = self._run_deployment_script(task=task, node=node, + ssh_client=ssh_client, + max_tries=max_tries) + return node + + def _run_deployment_script(self, task, node, ssh_client, max_tries=3): + """ + Run the deployment script on the provided node. At this point it is + assumed that SSH connection has already been established. + + :param task: Deployment task to run. + :type task: :class:`Deployment` + + :param node: Node to run the task on. + :type node: ``Node`` + + :param ssh_client: A configured and connected SSHClient instance. + :type ssh_client: :class:`SSHClient` + + :param max_tries: How many times to retry if a deployment fails + before giving up. (default is 3) + :type max_tries: ``int`` + + :rtype: :class:`.Node` + :return: ``Node`` Node instance on success. + """ + tries = 0 + + while tries < max_tries: + try: + node = task.run(node, ssh_client) + except Exception: + tries += 1 + + if tries >= max_tries: + e = sys.exc_info()[1] + raise LibcloudError(value='Failed after %d tries: %s' + % (max_tries, str(e)), driver=self) + else: + # Deployment succeeded + ssh_client.close() + return node + + def _get_size_price(self, size_id): + """ + Return pricing information for the provided size id. + """ + return get_size_price(driver_type='compute', + driver_name=self.api_name, + size_id=size_id) -if __name__ == "__main__": +if __name__ == '__main__': import doctest doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/compute/deployment.py libcloud-0.15.1/libcloud/compute/deployment.py --- libcloud-0.5.0/libcloud/compute/deployment.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/deployment.py 2014-06-11 14:27:59.000000000 +0000 @@ -16,7 +16,14 @@ """ Provides generic deployment steps for machines post boot. """ + +from __future__ import with_statement + import os +import binascii + +from libcloud.utils.py3 import basestring, PY3 + class Deployment(object): """ @@ -25,95 +32,221 @@ def run(self, node, client): """ - Runs this deployment task on C{node} using the C{client} provided. + Runs this deployment task on node using the client provided. - @type node: L{Node} - @keyword node: Node to operate one + :type node: :class:`Node` + :keyword node: Node to operate one - @type client: L{BaseSSHClient} - @keyword client: Connected SSH client to use. + :type client: :class:`BaseSSHClient` + :keyword client: Connected SSH client to use. - @return: L{Node} + :return: :class:`Node` """ - raise NotImplementedError, \ - 'run not implemented for this deployment' + raise NotImplementedError( + 'run not implemented for this deployment') + + def _get_string_value(self, argument_name, argument_value): + if not isinstance(argument_value, basestring) and \ + not hasattr(argument_value, 'read'): + raise TypeError('%s argument must be a string or a file-like ' + 'object' % (argument_name)) + + if hasattr(argument_value, 'read'): + argument_value = argument_value.read() + + return argument_value class SSHKeyDeployment(Deployment): """ - Installs a public SSH Key onto a host. + Installs a public SSH Key onto a server. """ def __init__(self, key): """ - @type key: C{str} - @keyword key: Contents of the public key write + :type key: ``str`` or :class:`File` object + :keyword key: Contents of the public key write or a file object which + can be read. + """ + self.key = self._get_string_value(argument_name='key', + argument_value=key) + + def run(self, node, client): + """ + Installs SSH key into ``.ssh/authorized_keys`` + + See also :class:`Deployment.run` + """ + client.put(".ssh/authorized_keys", contents=self.key, mode='a') + return node + + +class FileDeployment(Deployment): + """ + Installs a file on the server. + """ + + def __init__(self, source, target): + """ + :type source: ``str`` + :keyword source: Local path of file to be installed + + :type target: ``str`` + :keyword target: Path to install file on node """ - self.key = key + self.source = source + self.target = target def run(self, node, client): """ - Installs SSH key into C{.ssh/authorized_keys} + Upload the file, retaining permissions. - See also L{Deployment.run} + See also :class:`Deployment.run` """ - client.put(".ssh/authorized_keys", contents=self.key) + perms = int(oct(os.stat(self.source).st_mode)[4:], 8) + + with open(self.source, 'rb') as fp: + content = fp.read() + + client.put(path=self.target, chmod=perms, + contents=content) return node + class ScriptDeployment(Deployment): """ - Runs an arbitrary Shell Script task. + Runs an arbitrary shell script on the server. + + This step works by first writing the content of the shell script (script + argument) in a \*.sh file on a remote server and then running that file. + + If you are running a non-shell script, make sure to put the appropriate + shebang to the top of the script. You are also advised to do that even if + you are running a plan shell script. """ - def __init__(self, script, name=None, delete=False): + def __init__(self, script, args=None, name=None, delete=False): """ - @type script: C{str} - @keyword script: Contents of the script to run + :type script: ``str`` + :keyword script: Contents of the script to run. + + :type args: ``list`` + :keyword args: Optional command line arguments which get passed to the + deployment script file. - @type name: C{str} - @keyword name: Name of the script to upload it as, if not specified, a random name will be choosen. + :type name: ``str`` + :keyword name: Name of the script to upload it as, if not specified, + a random name will be chosen. - @type delete: C{bool} - @keyword delete: Whether to delete the script on completion. + :type delete: ``bool`` + :keyword delete: Whether to delete the script on completion. """ + script = self._get_string_value(argument_name='script', + argument_value=script) + self.script = script + self.args = args or [] self.stdout = None self.stderr = None self.exit_status = None self.delete = delete self.name = name + if self.name is None: - self.name = "/root/deployment_%s.sh" % (os.urandom(4).encode('hex')) + # File is put under user's home directory + # (~/libcloud_deployment_.sh) + random_string = binascii.hexlify(os.urandom(4)) + random_string = random_string.decode('ascii') + self.name = 'libcloud_deployment_%s.sh' % (random_string) def run(self, node, client): """ Uploads the shell script and then executes it. - See also L{Deployment.run} + See also :class:`Deployment.run` """ - client.put(path=self.name, chmod=755, contents=self.script) - self.stdout, self.stderr, self.exit_status = client.run(self.name) + file_path = client.put(path=self.name, chmod=int('755', 8), + contents=self.script) + + # Pre-pend cwd if user specified a relative path + if self.name[0] != '/': + base_path = os.path.dirname(file_path) + name = os.path.join(base_path, self.name) + else: + name = self.name + + cmd = name + + if self.args: + # Append arguments to the command + cmd = '%s %s' % (name, ' '.join(self.args)) + else: + cmd = name + + self.stdout, self.stderr, self.exit_status = client.run(cmd) + if self.delete: client.delete(self.name) + return node + +class ScriptFileDeployment(ScriptDeployment): + """ + Runs an arbitrary shell script from a local file on the server. Same as + ScriptDeployment, except that you can pass in a path to the file instead of + the script content. + """ + + def __init__(self, script_file, args=None, name=None, delete=False): + """ + :type script_file: ``str`` + :keyword script_file: Path to a file containing the script to run. + + :type args: ``list`` + :keyword args: Optional command line arguments which get passed to the + deployment script file. + + + :type name: ``str`` + :keyword name: Name of the script to upload it as, if not specified, + a random name will be chosen. + + :type delete: ``bool`` + :keyword delete: Whether to delete the script on completion. + """ + with open(script_file, 'rb') as fp: + content = fp.read() + + if PY3: + content = content.decode('utf-8') + + super(ScriptFileDeployment, self).__init__(script=content, + args=args, + name=name, + delete=delete) + + class MultiStepDeployment(Deployment): """ Runs a chain of Deployment steps. """ - def __init__(self, add = None): + def __init__(self, add=None): """ - @type add: C{list} - @keyword add: Deployment steps to add. + :type add: ``list`` + :keyword add: Deployment steps to add. """ self.steps = [] self.add(add) def add(self, add): - """Add a deployment to this chain. + """ + Add a deployment to this chain. - @type add: Single L{Deployment} or a C{list} of L{Deployment} - @keyword add: Adds this deployment to the others already in this object. + :type add: Single :class:`Deployment` or a ``list`` of + :class:`Deployment` + :keyword add: Adds this deployment to the others already in this + object. """ if add is not None: add = add if isinstance(add, (list, tuple)) else [add] @@ -123,7 +256,7 @@ """ Run each deployment that has been added. - See also L{Deployment.run} + See also :class:`Deployment.run` """ for s in self.steps: node = s.run(node, client) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/abiquo.py libcloud-0.15.1/libcloud/compute/drivers/abiquo.py --- libcloud-0.5.0/libcloud/compute/drivers/abiquo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/abiquo.py 2014-07-02 18:47:55.000000000 +0000 @@ -0,0 +1,759 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Abiquo Compute Driver + +The driver implements the compute Abiquo functionality for the Abiquo API. +This version is compatible with the following versions of Abiquo: + + * Abiquo 2.0 (http://wiki.abiquo.com/display/ABI20/The+Abiquo+API) + * Abiquo 2.2 (http://wiki.abiquo.com/display/ABI22/The+Abiquo+API) +""" +import xml.etree.ElementTree as ET + +from libcloud.compute.base import NodeDriver, NodeSize +from libcloud.compute.types import Provider, LibcloudError +from libcloud.common.abiquo import (AbiquoConnection, get_href, + AbiquoResponse) +from libcloud.compute.base import NodeLocation, NodeImage, Node +from libcloud.utils.py3 import tostring + + +class AbiquoNodeDriver(NodeDriver): + """ + Implements the :class:`NodeDriver`'s for the Abiquo Compute Provider + """ + + type = Provider.ABIQUO + name = 'Abiquo' + website = 'http://www.abiquo.com/' + connectionCls = AbiquoConnection + timeout = 2000 # some images take a lot of time! + + # Media Types + NODES_MIME_TYPE = 'application/vnd.abiquo.virtualmachineswithnode+xml' + NODE_MIME_TYPE = 'application/vnd.abiquo.virtualmachinewithnode+xml' + VAPP_MIME_TYPE = 'application/vnd.abiquo.virtualappliance+xml' + VM_TASK_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetask+xml' + + # Others constants + GIGABYTE = 1073741824 + + def __init__(self, user_id, secret, endpoint, **kwargs): + """ + Initializes Abiquo Driver + + Initializes the :class:`NodeDriver` object and populate the cache. + + :param user_id: identifier of Abiquo user (required) + :type user_id: ``str`` + :param secret: password of the Abiquo user (required) + :type secret: ``str`` + :param endpoint: Abiquo API endpoint (required) + :type endpoint: ``str`` that can be parsed as URL + """ + self.endpoint = endpoint + super(AbiquoNodeDriver, self).__init__(key=user_id, secret=secret, + secure=False, host=None, + port=None, **kwargs) + self.ex_populate_cache() + + def create_node(self, **kwargs): + """ + Create a new node instance in Abiquo + + All the :class:`Node`s need to be defined inside a VirtualAppliance + (called :class:`NodeGroup` here). If there is no group name defined, + 'libcloud' name will be used instead. + + This method wraps these Abiquo actions: + + 1. Create a group if it does not exist. + 2. Register a new node in the group. + 3. Deploy the node and boot it. + 4. Retrieves it again to get schedule-time attributes (such as ips + and vnc ports). + + The rest of the driver methods has been created in a way that, if any + of these actions fail, the user can not reach an inconsistent state + + :keyword name: The name for this new node (required) + :type name: ``str`` + + :keyword size: The size of resources allocated to this node. + :type size: :class:`NodeSize` + + :keyword image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` + + :keyword location: Which data center to create a node in. If empty, + undefined behavior will be selected. (optional) + :type location: :class:`NodeLocation` + + :keyword group_name: Which group this node belongs to. If empty, + it will be created into 'libcloud' group. If + it does not found any group in the target + location (random location if you have not set + the parameter), then it will create a new + group with this name. + :type group_name: c{str} + + :return: The newly created node. + :rtype: :class:`Node` + """ + # Define the location + # To be clear: + # 'xml_loc' is the xml element we navigate into (we need links) + # 'loc' is the :class:`NodeLocation` entity + xml_loc, loc = self._define_create_node_location(**kwargs) + + # Define the Group + group = self._define_create_node_group(xml_loc, loc, **kwargs) + + # Register the Node + vm = self._define_create_node_node(group, **kwargs) + + # Execute the 'create' in hypervisor action + self._deploy_remote(vm) + + # Retrieve it again, to get some schedule-time defined values + edit_vm = get_href(vm, 'edit') + headers = {'Accept': self.NODE_MIME_TYPE} + vm = self.connection.request(edit_vm, headers=headers).object + return self._to_node(vm, self) + + def destroy_node(self, node): + """ + Destroy a node + + Depending on the provider, this may destroy all data associated with + the node, including backups. + + :param node: The node to be destroyed + :type node: :class:`Node` + + :return: True if the destroy was successful, otherwise False + :rtype: ``bool`` + """ + + # Refresh node state + e_vm = self.connection.request(node.extra['uri_id']).object + state = e_vm.findtext('state') + + if state in ['ALLOCATED', 'CONFIGURED', 'LOCKED', 'UNKNOWN']: + raise LibcloudError('Invalid Node state', self) + + if state != 'NOT_ALLOCATED': + # prepare the element that forces the undeploy + vm_task = ET.Element('virtualmachinetask') + force_undeploy = ET.SubElement(vm_task, 'forceUndeploy') + force_undeploy.text = 'True' + # Set the URI + destroy_uri = node.extra['uri_id'] + '/action/undeploy' + # Prepare the headers + headers = {'Content-type': self.VM_TASK_MIME_TYPE} + res = self.connection.async_request(action=destroy_uri, + method='POST', + data=tostring(vm_task), + headers=headers) + + if state == 'NOT_ALLOCATED' or res.async_success(): + self.connection.request(action=node.extra['uri_id'], + method='DELETE') + return True + else: + return False + + def ex_run_node(self, node): + """ + Runs a node + + Here there is a bit difference between Abiquo states and libcloud + states, so this method is created to have better compatibility. In + libcloud, if the node is not running, then it does not exist (avoiding + UNKNOWN and temporal states). In Abiquo, you can define a node, and + then deploy it. + + If the node is in :class:`NodeState.TERMINATED` libcloud's state and in + 'NOT_DEPLOYED' Abiquo state, there is a way to run and recover it + for libcloud using this method. There is no way to reach this state + if you are using only libcloud, but you may have used another Abiquo + client and now you want to recover your node to be used by libcloud. + + :param node: The node to run + :type node: :class:`Node` + + :return: The node itself, but with the new state + :rtype: :class:`Node` + """ + # Refresh node state + e_vm = self.connection.request(node.extra['uri_id']).object + state = e_vm.findtext('state') + + if state != 'NOT_ALLOCATED': + raise LibcloudError('Invalid Node state', self) + + # -------------------------------------------------------- + # Deploy the Node + # -------------------------------------------------------- + self._deploy_remote(e_vm) + + # -------------------------------------------------------- + # Retrieve it again, to get some schedule-defined + # values. + # -------------------------------------------------------- + edit_vm = get_href(e_vm, 'edit') + headers = {'Accept': self.NODE_MIME_TYPE} + e_vm = self.connection.request(edit_vm, headers=headers).object + return self._to_node(e_vm, self) + + def ex_populate_cache(self): + """ + Populate the cache. + + For each connection, it is good to store some objects that will be + useful for further requests, such as the 'user' and the 'enterprise' + objects. + + Executes the 'login' resource after setting the connection parameters + and, if the execution is successful, it sets the 'user' object into + cache. After that, it also requests for the 'enterprise' and + 'locations' data. + + List of locations should remain the same for a single libcloud + connection. However, this method is public and you are able to + refresh the list of locations any time. + """ + user = self.connection.request('/login').object + self.connection.cache['user'] = user + e_ent = get_href(self.connection.cache['user'], + 'enterprise') + ent = self.connection.request(e_ent).object + self.connection.cache['enterprise'] = ent + + uri_vdcs = '/cloud/virtualdatacenters' + e_vdcs = self.connection.request(uri_vdcs).object + + # Set a dict for the datacenter and its href for a further search + params = {"idEnterprise": self._get_enterprise_id()} + e_dcs = self.connection.request('/admin/datacenters', + params=params).object + dc_dict = {} + for dc in e_dcs.findall('datacenter'): + key = get_href(dc, 'edit') + dc_dict[key] = dc + + # Populate locations cache + self.connection.cache['locations'] = {} + for e_vdc in e_vdcs.findall('virtualDatacenter'): + dc_link = get_href(e_vdc, 'datacenter') + loc = self._to_location(e_vdc, dc_dict[dc_link], self) + + # Save into cache the link to the itself because we will need + # it in the future, but we save here to don't extend the class + # :class:`NodeLocation`. + # So here we have the dict: :class:`NodeLocation` -> + # link_datacenter + self.connection.cache['locations'][loc] = get_href(e_vdc, 'edit') + + def ex_create_group(self, name, location=None): + """ + Create an empty group. + + You can specify the location as well. + + :param group: name of the group (required) + :type group: ``str`` + + :param location: location were to create the group + :type location: :class:`NodeLocation` + + :returns: the created group + :rtype: :class:`NodeGroup` + """ + # prepare the element + vapp = ET.Element('virtualAppliance') + vapp_name = ET.SubElement(vapp, 'name') + vapp_name.text = name + + if location is None: + location = self.list_locations()[0] + elif location not in self.list_locations(): + raise LibcloudError('Location does not exist') + + link_vdc = self.connection.cache['locations'][location] + e_vdc = self.connection.request(link_vdc).object + + creation_link = get_href(e_vdc, 'virtualappliances') + headers = {'Content-type': self.VAPP_MIME_TYPE} + vapp = self.connection.request(creation_link, data=tostring(vapp), + headers=headers, method='POST').object + + uri_vapp = get_href(vapp, 'edit') + + return NodeGroup(self, vapp.findtext('name'), + uri=uri_vapp) + + def ex_destroy_group(self, group): + """ + Destroy a group. + + Be careful! Destroying a group means destroying all the :class:`Node`s + there and the group itself! + + If there is currently any action over any :class:`Node` of the + :class:`NodeGroup`, then the method will raise an exception. + + :param name: The group (required) + :type name: :class:`NodeGroup` + + :return: If the group was destroyed successfully + :rtype: ``bool`` + """ + # Refresh group state + e_group = self.connection.request(group.uri).object + state = e_group.findtext('state') + + if state not in ['NOT_DEPLOYED', 'DEPLOYED']: + error = 'Can not destroy group because of current state' + raise LibcloudError(error, self) + + if state == 'DEPLOYED': + # prepare the element that forces the undeploy + vm_task = ET.Element('virtualmachinetask') + force_undeploy = ET.SubElement(vm_task, 'forceUndeploy') + force_undeploy.text = 'True' + + # Set the URI + undeploy_uri = group.uri + '/action/undeploy' + + # Prepare the headers + headers = {'Content-type': self.VM_TASK_MIME_TYPE} + res = self.connection.async_request(action=undeploy_uri, + method='POST', + data=tostring(vm_task), + headers=headers) + + if state == 'NOT_DEPLOYED' or res.async_success(): + # The node is no longer deployed. Unregister it. + self.connection.request(action=group.uri, + method='DELETE') + return True + else: + return False + + def ex_list_groups(self, location=None): + """ + List all groups. + + :param location: filter the groups by location (optional) + :type location: a :class:`NodeLocation` instance. + + :return: the list of :class:`NodeGroup` + """ + groups = [] + for vdc in self._get_locations(location): + link_vdc = self.connection.cache['locations'][vdc] + e_vdc = self.connection.request(link_vdc).object + apps_link = get_href(e_vdc, 'virtualappliances') + vapps = self.connection.request(apps_link).object + for vapp in vapps.findall('virtualAppliance'): + nodes = [] + vms_link = get_href(vapp, 'virtualmachines') + headers = {'Accept': self.NODES_MIME_TYPE} + vms = self.connection.request(vms_link, headers=headers).object + for vm in vms.findall('virtualmachinewithnode'): + nodes.append(self._to_node(vm, self)) + + group = NodeGroup(self, vapp.findtext('name'), + nodes, get_href(vapp, 'edit')) + groups.append(group) + + return groups + + def list_images(self, location=None): + """ + List images on Abiquo Repositories + + :keyword location: The location to list images for. + :type location: :class:`NodeLocation` + + :return: list of node image objects + :rtype: ``list`` of :class:`NodeImage` + """ + enterprise_id = self._get_enterprise_id() + uri = '/admin/enterprises/%s/datacenterrepositories/' % (enterprise_id) + repos = self.connection.request(uri).object + + images = [] + for repo in repos.findall('datacenterRepository'): + # filter by location. Skips when the name of the location + # is different from the 'datacenterRepository' element + for vdc in self._get_locations(location): + # Check if the virtual datacenter belongs to this repo + link_vdc = self.connection.cache['locations'][vdc] + e_vdc = self.connection.request(link_vdc).object + dc_link_vdc = get_href(e_vdc, 'datacenter') + dc_link_repo = get_href(repo, 'datacenter') + + if dc_link_vdc == dc_link_repo: + # Filter the template in case we don't have it yet + url_templates = get_href(repo, 'virtualmachinetemplates') + hypervisor_type = e_vdc.findtext('hypervisorType') + params = {'hypervisorTypeName': hypervisor_type} + templates = self.connection.request(url_templates, + params).object + for templ in templates.findall('virtualMachineTemplate'): + # Avoid duplicated templates + id_template = templ.findtext('id') + ids = [image.id for image in images] + if id_template not in ids: + images.append(self._to_nodeimage(templ, self, + get_href(repo, + 'edit'))) + + return images + + def list_locations(self): + """ + Return list of locations where the user has access to. + + :return: the list of :class:`NodeLocation` available for the current + user + :rtype: ``list`` of :class:`NodeLocation` + """ + return list(self.connection.cache['locations'].keys()) + + def list_nodes(self, location=None): + """ + List all nodes. + + :param location: Filter the groups by location (optional) + :type location: a :class:`NodeLocation` instance. + + :return: List of node objects + :rtype: ``list`` of :class:`Node` + """ + nodes = [] + + for group in self.ex_list_groups(location): + nodes.extend(group.nodes) + + return nodes + + def list_sizes(self, location=None): + """ + List sizes on a provider. + + Abiquo does not work with sizes. However, this method + returns a list of predefined ones (copied from :class:`DummyNodeDriver` + but without price neither bandwidth) to help the users to create their + own. + + If you call the method :class:`AbiquoNodeDriver.create_node` with the + size informed, it will just override the 'ram' value of the 'image' + template. So it is no too much usefull work with sizes... + + :return: The list of sizes + :rtype: ``list`` of :class:`NodeSizes` + """ + return [ + NodeSize(id=1, + name='Small', + ram=128, + disk=4, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=2, + name='Medium', + ram=512, + disk=16, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=3, + name='Big', + ram=4096, + disk=32, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=4, + name="XXL Big", + ram=4096 * 2, + disk=32 * 4, + bandwidth=None, + price=None, + driver=self) + ] + + def reboot_node(self, node): + """ + Reboot a node. + + :param node: The node to be rebooted + :type node: :class:`Node` + + :return: True if the reboot was successful, otherwise False + :rtype: ``bool`` + """ + reboot_uri = node.extra['uri_id'] + '/action/reset' + res = self.connection.async_request(action=reboot_uri, method='POST') + return res.async_success() + + # ------------------------- + # Extenstion methods + # ------------------------- + + def _ex_connection_class_kwargs(self): + """ + Set the endpoint as an extra :class:`AbiquoConnection` argument. + + According to Connection code, the "url" argument should be + parsed properly to connection. + + :return: ``dict`` of :class:`AbiquoConnection` input arguments + """ + + return {'url': self.endpoint} + + def _deploy_remote(self, e_vm): + """ + Asynchronous call to create the node. + """ + # -------------------------------------------------------- + # Deploy the Node + # -------------------------------------------------------- + # prepare the element that forces the deploy + vm_task = ET.Element('virtualmachinetask') + force_deploy = ET.SubElement(vm_task, 'forceEnterpriseSoftLimits') + force_deploy.text = 'True' + + # Prepare the headers + headers = {'Content-type': self.VM_TASK_MIME_TYPE} + link_deploy = get_href(e_vm, 'deploy') + res = self.connection.async_request(action=link_deploy, method='POST', + data=tostring(vm_task), + headers=headers) + if not res.async_success(): + raise LibcloudError('Could not run the node', self) + + def _to_location(self, vdc, dc, driver): + """ + Generates the :class:`NodeLocation` class. + """ + identifier = vdc.findtext('id') + name = vdc.findtext('name') + country = dc.findtext('name') + return NodeLocation(identifier, name, country, driver) + + def _to_node(self, vm, driver): + """ + Generates the :class:`Node` class. + """ + identifier = vm.findtext('id') + name = vm.findtext('nodeName') + state = AbiquoResponse.NODE_STATE_MAP[vm.findtext('state')] + + link_image = get_href(vm, 'virtualmachinetemplate') + image_element = self.connection.request(link_image).object + repo_link = get_href(image_element, 'datacenterrepository') + image = self._to_nodeimage(image_element, self, repo_link) + + # Fill the 'ips' data + private_ips = [] + public_ips = [] + nics_element = self.connection.request(get_href(vm, 'nics')).object + for nic in nics_element.findall('nic'): + ip = nic.findtext('ip') + for link in nic.findall('link'): + rel = link.attrib['rel'] + if rel == 'privatenetwork': + private_ips.append(ip) + elif rel in ['publicnetwork', 'externalnetwork', + 'unmanagednetwork']: + public_ips.append(ip) + + extra = {'uri_id': get_href(vm, 'edit')} + + if vm.find('vdrpIp') is not None: + extra['vdrp_ip'] = vm.findtext('vdrpIP') + extra['vdrp_port'] = vm.findtext('vdrpPort') + + return Node(identifier, name, state, public_ips, private_ips, + driver, image=image, extra=extra) + + def _to_nodeimage(self, template, driver, repo): + """ + Generates the :class:`NodeImage` class. + """ + identifier = template.findtext('id') + name = template.findtext('name') + url = get_href(template, 'edit') + extra = {'repo': repo, 'url': url} + return NodeImage(identifier, name, driver, extra) + + def _get_locations(self, location=None): + """ + Returns the locations as a generator. + """ + if location is not None: + yield location + else: + for loc in self.list_locations(): + yield loc + + def _get_enterprise_id(self): + """ + Returns the identifier of the logged user's enterprise. + """ + return self.connection.cache['enterprise'].findtext('id') + + def _define_create_node_location(self, **kwargs): + """ + Search for a location where to create the node. + + Based on 'create_node' **kwargs argument, decide in which + location will be created. + """ + # First, get image location + if 'image' not in kwargs: + error = "'image' parameter is mandatory" + raise LibcloudError(error, self) + + image = kwargs['image'] + + # Get the location argument + location = None + if 'location' in kwargs: + location = kwargs['location'] + if location not in self.list_locations(): + raise LibcloudError('Location does not exist') + + # Check if the image is compatible with any of the locations or + # the input location + loc = None + target_loc = None + for candidate_loc in self._get_locations(location): + link_vdc = self.connection.cache['locations'][candidate_loc] + e_vdc = self.connection.request(link_vdc).object + # url_location = get_href(e_vdc, 'datacenter') + for img in self.list_images(candidate_loc): + if img.id == image.id: + loc = e_vdc + target_loc = candidate_loc + break + + if loc is None: + error = 'The image can not be used in any location' + raise LibcloudError(error, self) + + return loc, target_loc + + def _define_create_node_group(self, xml_loc, loc, **kwargs): + """ + Search for a group where to create the node. + + If we can not find any group, create it into argument 'location' + """ + if 'group_name' not in kwargs: + group_name = NodeGroup.DEFAULT_GROUP_NAME + else: + group_name = kwargs['group_name'] + + # We search if the group is already defined into the location + groups_link = get_href(xml_loc, 'virtualappliances') + vapps_element = self.connection.request(groups_link).object + target_group = None + for vapp in vapps_element.findall('virtualAppliance'): + if vapp.findtext('name') == group_name: + uri_vapp = get_href(vapp, 'edit') + return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp) + + # target group not found: create it. Since it is an extension of + # the basic 'libcloud' functionality, we try to be as flexible as + # possible. + if target_group is None: + return self.ex_create_group(group_name, loc) + + def _define_create_node_node(self, group, **kwargs): + """ + Defines the node before to create. + + In Abiquo, you first need to 'register' or 'define' the node in + the API before to create it into the target hypervisor. + """ + vm = ET.Element('virtualmachinewithnode') + if 'name' in kwargs: + vmname = ET.SubElement(vm, 'nodeName') + vmname.text = kwargs['name'] + attrib = {'type': 'application/vnd.abiquo/virtualmachinetemplate+xml', + 'rel': 'virtualmachinetemplate', + 'href': kwargs['image'].extra['url']} + ET.SubElement(vm, 'link', attrib=attrib) + headers = {'Content-type': self.NODE_MIME_TYPE} + + if 'size' in kwargs: + # Override the 'NodeSize' data + ram = ET.SubElement(vm, 'ram') + ram.text = str(kwargs['size'].ram) + hd = ET.SubElement(vm, 'hdInBytes') + hd.text = str(int(kwargs['size'].disk) * self.GIGABYTE) + + # Create the virtual machine + nodes_link = group.uri + '/virtualmachines' + vm = self.connection.request(nodes_link, data=tostring(vm), + headers=headers, method='POST').object + edit_vm = get_href(vm, 'edit') + headers = {'Accept': self.NODE_MIME_TYPE} + + return self.connection.request(edit_vm, headers=headers).object + + +class NodeGroup(object): + """ + Group of virtual machines that can be managed together + + All :class:`Node`s in Abiquo must be defined inside a Virtual Appliance. + We offer a way to handle virtual appliances (called NodeGroup to + maintain some kind of name conventions here) inside the + :class:`AbiquoNodeDriver` without breaking compatibility of the rest of + libcloud API. + + If the user does not want to handle groups, all the virtual machines + will be created inside a group named 'libcloud' + """ + DEFAULT_GROUP_NAME = 'libcloud' + + def __init__(self, driver, name=DEFAULT_GROUP_NAME, nodes=[], uri=''): + """ + Initialize a new group object. + """ + self.driver = driver + self.name = name + self.nodes = nodes + self.uri = uri + + def __repr__(self): + return (('') + % (self.name, ",".join(map(str, self.nodes)))) + + def destroy(self): + """ + Destroys the group delegating the execution to + :class:`AbiquoNodeDriver`. + """ + return self.driver.ex_destroy_group(self) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/bluebox.py libcloud-0.15.1/libcloud/compute/drivers/bluebox.py --- libcloud-0.5.0/libcloud/compute/drivers/bluebox.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/bluebox.py 2013-11-29 12:35:04.000000000 +0000 @@ -19,19 +19,17 @@ This driver implements all libcloud functionality for the Blue Box Blocks API. Blue Box home page http://bluebox.net -Blue Box API documentation https://boxpanel.bluebox.net/public/the_vault/index.php/Blocks_API +Blue Box API documentation https://boxpanel.bluebox +.net/public/the_vault/index.php/Blocks_API """ import copy -import urllib import base64 -try: - import json -except: - import simplejson as json +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import b -from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState, InvalidCredsError from libcloud.compute.base import Node, NodeDriver @@ -45,52 +43,46 @@ # so we simply list what's available right now, along with all of the various # attributes that are needed by libcloud. BLUEBOX_INSTANCE_TYPES = { - '1gb': { - 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae', - 'name': 'Block 1GB Virtual Server', - 'ram': 1024, - 'disk': 20, - 'cpu': 0.5 - }, - '2gb': { - 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092', - 'name': 'Block 2GB Virtual Server', - 'ram': 2048, - 'disk': 25, - 'cpu': 1 - }, - '4gb': { - 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58', - 'name': 'Block 4GB Virtual Server', - 'ram': 4096, - 'disk': 50, - 'cpu': 2 - }, - '8gb': { - 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251', - 'name': 'Block 8GB Virtual Server', - 'ram': 8192, - 'disk': 100, - 'cpu': 4 - } + '1gb': { + 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae', + 'name': 'Block 1GB Virtual Server', + 'ram': 1024, + 'disk': 20, + 'cpu': 0.5 + }, + '2gb': { + 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092', + 'name': 'Block 2GB Virtual Server', + 'ram': 2048, + 'disk': 25, + 'cpu': 1 + }, + '4gb': { + 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58', + 'name': 'Block 4GB Virtual Server', + 'ram': 4096, + 'disk': 50, + 'cpu': 2 + }, + '8gb': { + 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251', + 'name': 'Block 8GB Virtual Server', + 'ram': 8192, + 'disk': 100, + 'cpu': 4 + } } RAM_PER_CPU = 2048 -NODE_STATE_MAP = { 'queued': NodeState.PENDING, - 'building': NodeState.PENDING, - 'running': NodeState.RUNNING, - 'error': NodeState.TERMINATED, - 'unknown': NodeState.UNKNOWN } - -class BlueboxResponse(Response): - def parse_body(self): - try: - js = json.loads(self.body) - return js - except ValueError: - return self.body +NODE_STATE_MAP = {'queued': NodeState.PENDING, + 'building': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'error': NodeState.TERMINATED, + 'unknown': NodeState.UNKNOWN} + +class BlueboxResponse(JsonResponse): def parse_error(self): if int(self.status) == 401: if not self.body: @@ -99,6 +91,7 @@ raise InvalidCredsError(self.body) return self.body + class BlueboxNodeSize(NodeSize): def __init__(self, id, name, cpu, ram, disk, price, driver): self.id = id @@ -110,8 +103,12 @@ self.driver = driver def __repr__(self): - return (('') - % (self.id, self.name, self.cpu, self.ram, self.disk, self.price, self.driver.name)) + return (( + '') + % (self.id, self.name, self.cpu, self.ram, self.disk, + self.price, self.driver.name)) + class BlueboxConnection(ConnectionUserAndKey): """ @@ -122,11 +119,14 @@ secure = True responseCls = BlueboxResponse + allow_insecure = False + def add_default_headers(self, headers): - user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64) return headers + class BlueboxNodeDriver(NodeDriver): """ Bluebox Blocks node driver @@ -136,6 +136,8 @@ type = Provider.BLUEBOX api_name = 'bluebox' name = 'Bluebox Blocks' + website = 'http://bluebox.net' + features = {'create_node': ['ssh_key', 'password']} def list_nodes(self): result = self.connection.request('/api/blocks.json') @@ -143,9 +145,9 @@ def list_sizes(self, location=None): sizes = [] - for key, values in BLUEBOX_INSTANCE_TYPES.iteritems(): + for key, values in list(BLUEBOX_INSTANCE_TYPES.items()): attributes = copy.deepcopy(values) - attributes.update({ 'price': self._get_size_price(size_id=key) }) + attributes.update({'price': self._get_size_price(size_id=key)}) sizes.append(BlueboxNodeSize(driver=self.connection.driver, **attributes)) @@ -155,22 +157,19 @@ result = self.connection.request('/api/block_templates.json') images = [] for image in result.object: - images.extend([self._to_image(image)]) + images.extend([self._to_image(image)]) return images def create_node(self, **kwargs): - headers = { 'Content-Type': 'application/x-www-form-urlencoded' } + headers = {'Content-Type': 'application/x-www-form-urlencoded'} size = kwargs["size"] name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] - try: - auth = kwargs['auth'] - except Exception: - raise Exception("SSH public key or password required.") + auth = self._get_and_check_auth(kwargs.get('auth')) data = { 'hostname': name, @@ -194,15 +193,17 @@ if not ssh and not password: raise Exception("SSH public key or password required.") - params = urllib.urlencode(data) - result = self.connection.request('/api/blocks.json', headers=headers, data=params, method='POST') + params = urlencode(data) + result = self.connection.request('/api/blocks.json', headers=headers, + data=params, method='POST') node = self._to_node(result.object) + + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + return node def destroy_node(self, node): - """ - Destroy node by passing in the node object - """ url = '/api/blocks/%s.json' % (node.id) result = self.connection.request(url, method='DELETE') @@ -221,9 +222,9 @@ n = Node(id=vm['id'], name=vm['hostname'], state=state, - public_ip=[ ip['address'] for ip in vm['ips'] ], - private_ip=[], - extra={'storage':vm['storage'], 'cpu':vm['cpu']}, + public_ips=[ip['address'] for ip in vm['ips']], + private_ips=[], + extra={'storage': vm['storage'], 'cpu': vm['cpu']}, driver=self.connection.driver) return n diff -Nru libcloud-0.5.0/libcloud/compute/drivers/brightbox.py libcloud-0.15.1/libcloud/compute/drivers/brightbox.py --- libcloud-0.5.0/libcloud/compute/drivers/brightbox.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/brightbox.py 2014-07-02 18:47:55.000000000 +0000 @@ -15,80 +15,23 @@ """ Brightbox Driver """ -import httplib -import base64 -from libcloud.common.base import ConnectionUserAndKey, Response -from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +from libcloud.common.brightbox import BrightboxConnection +from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation -try: - import json -except ImportError: - import simplejson as json - -API_VERSION = '1.0' - - -class BrightboxResponse(Response): - def success(self): - return self.status >= 200 and self.status < 400 - - def parse_body(self): - if self.headers['content-type'].split('; ')[0] == 'application/json' and len(self.body) > 0: - return json.loads(self.body) - else: - return self.body - - def parse_error(self): - return json.loads(self.body)['error'] - - -class BrightboxConnection(ConnectionUserAndKey): - """ - Connection class for the Brightbox driver - """ - - host = 'api.gb1.brightbox.com' - responseCls = BrightboxResponse - - def _fetch_oauth_token(self): - body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) - - authorization = 'Basic ' + base64.encodestring('%s:%s' % (self.user_id, self.key)).rstrip() - - self.connect() - - response = self.connection.request(method='POST', url='/token', body=body, headers={ - 'Host': self.host, - 'User-Agent': self._user_agent(), - 'Authorization': authorization, - 'Content-Type': 'application/json', - 'Content-Length': str(len(body)) - }) - - response = self.connection.getresponse() - - if response.status == 200: - return json.loads(response.read())['access_token'] - else: - message = '%s (%s)' % (json.loads(response.read())['error'], response.status) - - raise InvalidCredsError, message +import base64 - def add_default_headers(self, headers): - try: - headers['Authorization'] = 'OAuth ' + self.token - except AttributeError: - self.token = self._fetch_oauth_token() - headers['Authorization'] = 'OAuth ' + self.token +API_VERSION = '1.0' - return headers - def encode_data(self, data): - return json.dumps(data) +def _extract(d, keys): + return dict((k, d[k]) for k in keys if k in d and d[k] is not None) class BrightboxNodeDriver(NodeDriver): @@ -100,122 +43,264 @@ type = Provider.BRIGHTBOX name = 'Brightbox' + website = 'http://www.brightbox.co.uk/' - NODE_STATE_MAP = { 'creating': NodeState.PENDING, - 'active': NodeState.RUNNING, - 'inactive': NodeState.UNKNOWN, - 'deleting': NodeState.UNKNOWN, - 'deleted': NodeState.TERMINATED, - 'failed': NodeState.UNKNOWN } + NODE_STATE_MAP = {'creating': NodeState.PENDING, + 'active': NodeState.RUNNING, + 'inactive': NodeState.UNKNOWN, + 'deleting': NodeState.UNKNOWN, + 'deleted': NodeState.TERMINATED, + 'failed': NodeState.UNKNOWN, + 'unavailable': NodeState.UNKNOWN} + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=API_VERSION, **kwargs): + super(BrightboxNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, + host=host, port=port, + api_version=api_version, + **kwargs) def _to_node(self, data): + extra_data = _extract(data, ['fqdn', 'user_data', 'status', + 'interfaces', 'snapshots', + 'server_groups', 'hostname', + 'started_at', 'created_at', + 'deleted_at']) + extra_data['zone'] = self._to_location(data['zone']) + + ipv6_addresses = [interface['ipv6_address'] for interface + in data['interfaces'] if 'ipv6_address' in interface] + + private_ips = [interface['ipv4_address'] + for interface in data['interfaces'] + if 'ipv4_address' in interface] + + public_ips = [cloud_ip['public_ip'] for cloud_ip in data['cloud_ips']] + public_ips += ipv6_addresses + return Node( - id = data['id'], - name = data['name'], - state = self.NODE_STATE_MAP[data['status']], - public_ip = map(lambda cloud_ip: cloud_ip['public_ip'], data['cloud_ips']), - private_ip = map(lambda interface: interface['ipv4_address'], data['interfaces']), - driver = self.connection.driver, - extra = { - 'status': data['status'], - 'interfaces': data['interfaces'] - } + id=data['id'], + name=data['name'], + state=self.NODE_STATE_MAP[data['status']], + private_ips=private_ips, + public_ips=public_ips, + driver=self.connection.driver, + size=self._to_size(data['server_type']), + image=self._to_image(data['image']), + extra=extra_data ) def _to_image(self, data): + extra_data = _extract(data, ['arch', 'compatibility_mode', + 'created_at', 'description', + 'disk_size', 'min_ram', 'official', + 'owner', 'public', 'source', + 'source_type', 'status', 'username', + 'virtual_size', 'licence_name']) + + if data.get('ancestor', None): + extra_data['ancestor'] = self._to_image(data['ancestor']) + return NodeImage( - id = data['id'], - name = data['name'], - driver = self, - extra = { - 'description': data['description'], - 'arch': data['arch'] - } + id=data['id'], + name=data['name'], + driver=self, + extra=extra_data ) def _to_size(self, data): return NodeSize( - id = data['id'], - name = data['name'], - ram = data['ram'], - disk = data['disk_size'], - bandwidth = 0, - price = '', - driver = self + id=data['id'], + name=data['name'], + ram=data['ram'], + disk=data['disk_size'], + bandwidth=0, + price=0, + driver=self ) def _to_location(self, data): - return NodeLocation( - id = data['id'], - name = data['handle'], - country = 'GB', - driver = self - ) + if data: + return NodeLocation( + id=data['id'], + name=data['handle'], + country='GB', + driver=self + ) + else: + return None def _post(self, path, data={}): headers = {'Content-Type': 'application/json'} + return self.connection.request(path, data=data, headers=headers, + method='POST') - return self.connection.request(path, data=data, headers=headers, method='POST') + def _put(self, path, data={}): + headers = {'Content-Type': 'application/json'} + return self.connection.request(path, data=data, headers=headers, + method='PUT') def create_node(self, **kwargs): + """Create a new Brightbox node + + Reference: https://api.gb1.brightbox.com/1.0/#server_create_server + + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_userdata: User data + :type ex_userdata: ``str`` + + :keyword ex_servergroup: Name or list of server group ids to + add server to + :type ex_servergroup: ``str`` or ``list`` of ``str`` + """ data = { 'name': kwargs['name'], 'server_type': kwargs['size'].id, 'image': kwargs['image'].id, - 'user_data': '' } - if kwargs.has_key('location'): + if 'ex_userdata' in kwargs: + data['user_data'] = base64.b64encode(b(kwargs['ex_userdata'])) \ + .decode('ascii') + + if 'location' in kwargs: data['zone'] = kwargs['location'].id - else: - data['zone'] = '' - data = self._post('/%s/servers' % API_VERSION, data).object + if 'ex_servergroup' in kwargs: + if not isinstance(kwargs['ex_servergroup'], list): + kwargs['ex_servergroup'] = [kwargs['ex_servergroup']] + data['server_groups'] = kwargs['ex_servergroup'] + data = self._post('/%s/servers' % self.api_version, data).object return self._to_node(data) def destroy_node(self, node): - response = self.connection.request('/%s/servers/%s' % (API_VERSION, node.id), method='DELETE') - + response = self.connection.request( + '/%s/servers/%s' % (self.api_version, node.id), + method='DELETE') return response.status == httplib.ACCEPTED def list_nodes(self): - data = self.connection.request('/%s/servers' % API_VERSION).object + data = self.connection.request('/%s/servers' % self.api_version).object + return list(map(self._to_node, data)) - return map(self._to_node, data) + def list_images(self, location=None): + data = self.connection.request('/%s/images' % self.api_version).object + return list(map(self._to_image, data)) - def list_images(self): - data = self.connection.request('/%s/images' % API_VERSION).object + def list_sizes(self): + data = self.connection.request('/%s/server_types' % self.api_version) \ + .object + return list(map(self._to_size, data)) - return map(self._to_image, data) + def list_locations(self): + data = self.connection.request('/%s/zones' % self.api_version).object + return list(map(self._to_location, data)) - def list_sizes(self): - data = self.connection.request('/%s/server_types' % API_VERSION).object + def ex_list_cloud_ips(self): + """ + List Cloud IPs - return map(self._to_size, data) + @note: This is an API extension for use on Brightbox - def list_locations(self): - data = self.connection.request('/%s/zones' % API_VERSION).object + :rtype: ``list`` of ``dict`` + """ + return self.connection.request('/%s/cloud_ips' % self.api_version) \ + .object - return map(self._to_location, data) + def ex_create_cloud_ip(self, reverse_dns=None): + """ + Requests a new cloud IP address for the account - def ex_list_cloud_ips(self): - return self.connection.request('/%s/cloud_ips' % API_VERSION).object + @note: This is an API extension for use on Brightbox - def ex_create_cloud_ip(self): - return self._post('/%s/cloud_ips' % API_VERSION).object + :param reverse_dns: Reverse DNS hostname + :type reverse_dns: ``str`` - def ex_map_cloud_ip(self, cloud_ip_id, interface_id): - response = self._post('/%s/cloud_ips/%s/map' % (API_VERSION, cloud_ip_id), {'interface': interface_id}) + :rtype: ``dict`` + """ + params = {} + + if reverse_dns: + params['reverse_dns'] = reverse_dns + + return self._post('/%s/cloud_ips' % self.api_version, params).object + + def ex_update_cloud_ip(self, cloud_ip_id, reverse_dns): + """ + Update some details of the cloud IP address + + @note: This is an API extension for use on Brightbox + + :param cloud_ip_id: The id of the cloud ip. + :type cloud_ip_id: ``str`` + :param reverse_dns: Reverse DNS hostname + :type reverse_dns: ``str`` + + :rtype: ``dict`` + """ + response = self._put('/%s/cloud_ips/%s' % (self.api_version, + cloud_ip_id), + {'reverse_dns': reverse_dns}) + return response.status == httplib.OK + + def ex_map_cloud_ip(self, cloud_ip_id, interface_id): + """ + Maps (or points) a cloud IP address at a server's interface + or a load balancer to allow them to respond to public requests + + @note: This is an API extension for use on Brightbox + + :param cloud_ip_id: The id of the cloud ip. + :type cloud_ip_id: ``str`` + + :param interface_id: The Interface ID or LoadBalancer ID to + which this Cloud IP should be mapped to + :type interface_id: ``str`` + + :return: True if the mapping was successful. + :rtype: ``bool`` + """ + response = self._post('/%s/cloud_ips/%s/map' % (self.api_version, + cloud_ip_id), + {'destination': interface_id}) return response.status == httplib.ACCEPTED def ex_unmap_cloud_ip(self, cloud_ip_id): - response = self._post('/%s/cloud_ips/%s/unmap' % (API_VERSION, cloud_ip_id)) - + """ + Unmaps a cloud IP address from its current destination making + it available to remap. This remains in the account's pool + of addresses + + @note: This is an API extension for use on Brightbox + + :param cloud_ip_id: The id of the cloud ip. + :type cloud_ip_id: ``str`` + + :return: True if the unmap was successful. + :rtype: ``bool`` + """ + response = self._post('/%s/cloud_ips/%s/unmap' % (self.api_version, + cloud_ip_id)) return response.status == httplib.ACCEPTED def ex_destroy_cloud_ip(self, cloud_ip_id): - response = self.connection.request('/%s/cloud_ips/%s' % (API_VERSION, cloud_ip_id), method='DELETE') + """ + Release the cloud IP address from the account's ownership + + @note: This is an API extension for use on Brightbox + + :param cloud_ip_id: The id of the cloud ip. + :type cloud_ip_id: ``str`` + :return: True if the unmap was successful. + :rtype: ``bool`` + """ + response = self.connection.request( + '/%s/cloud_ips/%s' % (self.api_version, + cloud_ip_id), + method='DELETE') return response.status == httplib.OK diff -Nru libcloud-0.5.0/libcloud/compute/drivers/cloudframes.py libcloud-0.15.1/libcloud/compute/drivers/cloudframes.py --- libcloud-0.5.0/libcloud/compute/drivers/cloudframes.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/cloudframes.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,431 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +CloudFrames Driver + +""" + +# (name, ram, disk, bandwidth, price, vcpus) +SIZES = [ + ('512mb_1core_10gb', 512, 10, 512, 0.025, 1), + ('1024mb_1core_20gb', 1024, 20, 512, 0.05, 1), + ('2048mb_2core_50gb', 2048, 50, 1024, 0.10, 2), + ('4096mb_2core_100gb', 4096, 100, 2048, 0.20, 2), + ('8192mb_4core_200gb', 8192, 200, 2048, 0.40, 4), + ('16384mb_4core_400gb', 16384, 400, 4096, 0.80, 4), +] + +import base64 +import random + +from libcloud.utils.py3 import urlparse, b +from libcloud.common.base import ConnectionKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection +from libcloud.common.types import ProviderError +from libcloud.compute.base import NodeImage, NodeSize, Node, NodeLocation +from libcloud.compute.base import NodeDriver +from libcloud.compute.types import Provider, NodeState + + +class CloudFramesException(ProviderError): + pass + + +class CloudFramesComponent(object): + """ + Represents a node in the cloudapi path. + """ + + def __init__(self, cloudFramesConnection, name): + self.cloudFramesConnection = cloudFramesConnection + self.name = name + + def __getattr__(self, key): + return self.method(key) + + def method(self, methodname): + def foo(*args, **kwargs): + async = kwargs.get('async', False) + args = list(args) + args.append('') # jobguid + args.append({'wait': False} if async else {}) # executionparams + response = self.cloudFramesConnection.request( + 'cloud_api_%s.%s' % (self.name, methodname), *args) + if not response.success(): + response.parse_error() + if async: + return response.parse_body()['jobguid'] + else: + return response.parse_body()['result'] + return foo + + +class CloudFramesNodeSize(NodeSize): + + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + vcpus=None): + super(CloudFramesNodeSize, self).__init__( + id, name, ram, disk, bandwidth, price, driver) + self.vcpus = vcpus + + +class CloudFramesNode(Node): + + def list_snapshots(self): + return self.driver.ex_list_snapshots(self) + + def snapshot(self, label='', description=''): + return self.driver.ex_snapshot_node(self, label, description) + + def rollback(self, snapshot): + return self.driver.ex_rollback_node(self, snapshot) + + +class CloudFramesSnapshot(object): + + def __init__(self, id, timestamp, label, description, driver): + self.id = id + self.timestamp = timestamp + self.label = label + self.description = description + self.driver = driver + + def destroy(self): + self.driver.ex_destroy_snapshot(self) + + +class CloudFramesConnection(XMLRPCConnection, ConnectionKey): + """ + Cloudapi connection class + """ + + repsonseCls = XMLRPCResponse + base_url = None + + def __init__(self, key=None, secret=None, secure=True, + host=None, port=None, url=None, timeout=None): + """ + :param key: The username to connect with to the cloudapi + :type key: ``str`` + + :param secret: The password to connect with to the cloudapi + :type secret: ``str`` + + :param secure: Should always be false at the moment + :type secure: ``bool`` + + :param host: The hostname of the cloudapi + :type host: ``str`` + + :param port: The port on which to connect to the cloudapi + :type port: ``int`` + + :param url: Url to the cloudapi (can replace all above) + :type url: ``str`` + """ + + super(CloudFramesConnection, self).__init__(key=key, secure=secure, + host=host, port=port, + url=url, timeout=timeout) + self._auth = base64.b64encode( + b('%s:%s' % (key, secret))).decode('utf-8') + self.endpoint = url + + def __getattr__(self, key): + return CloudFramesComponent(self, key) + + def add_default_headers(self, headers): + headers['Authorization'] = 'Basic %s' % self._auth + return headers + + +class CloudFramesNodeDriver(NodeDriver): + """ + CloudFrames node driver + """ + + connectionCls = CloudFramesConnection + + name = 'CloudFrames' + api_name = 'cloudframes' + website = 'http://www.cloudframes.net/' + type = Provider.CLOUDFRAMES + + NODE_STATE_MAP = { + 'CONFIGURED': NodeState.PENDING, + 'CREATED': NodeState.PENDING, + 'DELETING': NodeState.PENDING, + 'HALTED': NodeState.TERMINATED, + 'IMAGEONLY': NodeState.UNKNOWN, + 'ISCSIEXPOSED': NodeState.PENDING, + 'MOVING': NodeState.PENDING, + 'OVERLOADED': NodeState.UNKNOWN, + 'PAUSED': NodeState.TERMINATED, + 'RUNNING': NodeState.RUNNING, + 'STARTING': NodeState.PENDING, + 'STOPPING': NodeState.PENDING, + 'SYNCING': NodeState.PENDING, + 'TODELETE': NodeState.PENDING, + } + + # subclassed internal methods + def __init__(self, key=None, secret=None, secure=True, + host=None, port=None, url=None, **kwargs): + if not port: + port = 443 if secure else 80 + if url: + if not url.endswith('/'): + url += '/' + scheme, netloc, _, _, _, _ = urlparse.urlparse(url) + secure = (scheme == 'https') + if '@' in netloc: + auth, hostport = netloc.rsplit('@', 1) + if ':' in auth: + key, secret = auth.split(':', 1) + else: + key = auth + else: + hostport = netloc + if ':' in hostport: + host, port = hostport.split(':') + else: + host = hostport + hostport = '%s:%s' % (host, port) + url = url.replace(netloc, hostport) + else: + url = '%s://%s:%s/appserver/xmlrpc/' % ( + 'https' if secure else 'http', host, port) + + if secure: + raise NotImplementedError( + 'The cloudapi only supports unsecure connections') + + if key is None or secret is None: + raise NotImplementedError( + 'Unauthenticated support to the cloudapi is not supported') + + # connection url + self._url = url + + # cached attributes + self.__cloudspaceguid = None + self.__languid = None + self.__locations = [] + + super(CloudFramesNodeDriver, self).__init__( + key, secret, secure, host, port, **kwargs) + + def _ex_connection_class_kwargs(self): + return {'url': self._url} + + # internal methods + @property + def _cloudspaceguid(self): + if not self.__cloudspaceguid: + self.__cloudspaceguid = self.connection.cloudspace.find( + '', '', 'cloud', '')[0] + return self.__cloudspaceguid + + @property + def _languid(self): + if not self.__languid: + self.__languid = self.connection.lan.find( + '', '', 'public_virtual', '', '', '', '', '', '', '', '', '', + '', '', '', '', '')[0] + return self.__languid + + def _get_machine_data(self, guid): + """ + Looks up some basic data related to the given machine guid. + """ + try: + d = self.connection.machine.list('', '', '', guid, '')[0] + except IndexError: + raise CloudFramesException('VM no longer exists', 404, self) + d['public_ips'] = [] + d['private_ips'] = [] + d['size'] = None + d['image'] = None + return d + + def _machine_find(self, template=False, machinetype=None, + machinerole=None): + # the cloudframes xmlrpc api requires you to pass all args and kwargs + # as positional arguments, you can't use keywords arguments + if not machinetype: + guids = [] + for machinetype in ['VIRTUALSERVER', 'VIRTUALDESKTOP']: + guids += self.connection.machine.find( + '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', + '', '', machinetype, template, '', '', '', '', '', '', '', + '', '', '', '', '', '', '') + else: + guids = self.connection.machine.find( + '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', + '', '', machinetype, '', '', '', '', '', '', '', '', + machinerole, '', '', '', '', '', '') + return guids + + def _to_image(self, image_dict): + return NodeImage(id=image_dict['guid'], + name=image_dict['name'], + driver=self.connection.driver) + + def _to_size(self, id, name, ram, disk, bandwidth, price, vcpus): + return CloudFramesNodeSize( + id, name, ram, disk, bandwidth, price, self, vcpus) + + def _to_location(self, location_dict): + return NodeLocation(id=location_dict['guid'], + name=location_dict['name'], + country=None, + driver=self) + + def _to_node(self, node_dict): + # only return nodes which can be worked with + # (ignore cloudframes internal autotests and deleted nodes) + if node_dict['status'] == 'CONFIGURED': + return None + return CloudFramesNode(id=node_dict['guid'], + name=node_dict['name'], + state=self.NODE_STATE_MAP.get( + node_dict['status'], NodeState.UNKNOWN), + public_ips=node_dict['public_ips'], + private_ips=node_dict['private_ips'], + driver=self.connection.driver, + size=node_dict['size'], + image=node_dict['image'], + extra={}) + + def _to_snapshot(self, snapshot_dict): + return CloudFramesSnapshot(id=snapshot_dict['guid'], + timestamp=snapshot_dict['timestamp'], + label=snapshot_dict['backuplabel'], + description=snapshot_dict['description'], + driver=self) + + # subclassed public methods, and provider specific public methods + def list_images(self, location=None): + image_ids = self._machine_find(template=True) + image_list = [] + for image_id in image_ids: + image_list.append(self._to_image(self._get_machine_data(image_id))) + return image_list + + def list_sizes(self, location=None): + sizes = [] + for id in range(len(SIZES)): + sizes.append(self._to_size(id, *SIZES[id])) + return sizes + + def list_locations(self, ex_use_cached=True): + if not self.__locations or not ex_use_cached: + self.__locations = [] + for location_id in self._machine_find(machinetype='PHYSICAL', + machinerole='COMPUTENODE'): + self.__locations.append( + self._to_location(self._get_machine_data(location_id))) + return self.__locations + + def list_nodes(self): + node_ids = self._machine_find() + node_list = [] + for node_id in node_ids: + node = self._to_node(self._get_machine_data(node_id)) + if node: + node_list.append(node) + return node_list + + def create_node(self, **kwargs): + """ + Creates a new node, by cloning the template provided. + + If no location object is passed, a random location will be used. + + + :param image: The template to be cloned (required) + :type image: ``list`` of :class:`NodeImage` + + :param name: The name for the new node (required) + :type name: ``str`` + + :param size: The size of the new node (required) + :type size: ``list`` of :class:`NodeSize` + + :param location: The location to create the new node + :type location: ``list`` of :class:`NodeLocation` + + :param default_gateway: The default gateway to be used + :type default_gateway: ``str`` + + :param extra: Additional requirements (extra disks fi.) + :type extra: ``dict`` + + + :returns: ``list`` of :class:`Node` -- The newly created Node object + + :raises: CloudFramesException + """ + + additionalinfo = kwargs.get('extra', {}) + additionalinfo.update({ + 'memory': kwargs['size'].ram, + 'cpu': kwargs['size'].vcpus, + }) + guid = self.connection.machine.createFromTemplate( + self._cloudspaceguid, kwargs['image'].id, kwargs['name'], + [{'languid': self._languid}], kwargs['name'], + kwargs.get('location', random.choice(self.list_locations())).id, + kwargs.get('default_gateway', ''), None, additionalinfo) + if not self.connection.machine.start(guid): + raise CloudFramesException( + 'failed to start machine after creation', 500, self) + return self._to_node(self._get_machine_data(guid)) + + def destroy_node(self, node): + return self.connection.machine.delete(node.id, False) + + def reboot_node(self, node, ex_clean=True): + return self.connection.machine.reboot(node.id, ex_clean) + + def ex_snapshot_node(self, node, label='', description=''): + guid = self.connection.machine.snapshot( + node.id, label, description, False, False, 'PAUSED') + for snapshot in self.ex_list_snapshots(node): + if snapshot.id == guid: + return snapshot + else: + raise CloudFramesException('Snapshot creation failed', 500, self) + + def ex_rollback_node(self, node, snapshot): + if not node.state == NodeState.TERMINATED: + self.connection.machine.stop(node.id, False, 930) + success = self.connection.machine.rollback(node.id, snapshot.id) + self.connection.machine.start(node.id) + return success + + def ex_list_snapshots(self, node): + return [self._to_snapshot(snapshot_dict) for snapshot_dict in + self.connection.machine.listSnapshots(node.id, False, '', '')] + + def ex_destroy_snapshot(self, node, snapshot): + return self.connection.machine.delete(snapshot.id, False) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/compute/drivers/cloudsigma.py libcloud-0.15.1/libcloud/compute/drivers/cloudsigma.py --- libcloud-0.5.0/libcloud/compute/drivers/cloudsigma.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/cloudsigma.py 2014-07-02 18:47:55.000000000 +0000 @@ -13,120 +13,69 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """ -CloudSigma Driver +Drivers for CloudSigma API v1.0 and v2.0. """ + import re import time +import copy import base64 -from libcloud.utils import str2dicts, str2list, dict2str -from libcloud.common.base import ConnectionUserAndKey, Response -from libcloud.common.types import InvalidCredsError +try: + import simplejson as json +except: + import json + +from libcloud.utils.py3 import b +from libcloud.utils.py3 import httplib + +from libcloud.utils.misc import str2dicts, str2list, dict2str +from libcloud.common.base import ConnectionUserAndKey, JsonResponse, Response +from libcloud.common.types import InvalidCredsError, ProviderError +from libcloud.common.cloudsigma import INSTANCE_TYPES +from libcloud.common.cloudsigma import API_ENDPOINTS_1_0 +from libcloud.common.cloudsigma import API_ENDPOINTS_2_0 +from libcloud.common.cloudsigma import DEFAULT_API_VERSION, DEFAULT_REGION from libcloud.compute.types import NodeState, Provider from libcloud.compute.base import NodeDriver, NodeSize, Node from libcloud.compute.base import NodeImage +from libcloud.compute.base import is_private_subnet +from libcloud.utils.iso8601 import parse_date +from libcloud.utils.misc import get_secure_random_string + +__all__ = [ + 'CloudSigmaNodeDriver', + 'CloudSigma_1_0_NodeDriver', + 'CloudSigma_2_0_NodeDriver', + 'CloudSigmaError', + + 'CloudSigmaNodeSize', + 'CloudSigmaDrive', + 'CloudSigmaTag', + 'CloudSigmaSubscription', + 'CloudSigmaFirewallPolicy', + 'CloudSigmaFirewallPolicyRule' +] -# API end-points -API_ENDPOINTS = { - 'zrh': { - 'name': 'Zurich', - 'country': 'Switzerland', - 'host': 'api.cloudsigma.com' - }, -} - -# Default API end-point for the base connection clase. -DEFAULT_ENDPOINT = 'zrh' - -# CloudSigma doesn't specify special instance types. -# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work, 500 MB to 32000 MB for ram -# and 1 GB to 1024 GB for hard drive size. -# Plans in this file are based on examples listed on http://www.cloudsigma.com/en/pricing/price-schedules -INSTANCE_TYPES = { - 'micro-regular': { - 'id': 'micro-regular', - 'name': 'Micro/Regular instance', - 'cpu': 1100, - 'memory': 640, - 'disk': 50, - 'bandwidth': None, - }, - 'micro-high-cpu': { - 'id': 'micro-high-cpu', - 'name': 'Micro/High CPU instance', - 'cpu': 2200, - 'memory': 640, - 'disk': 80, - 'bandwidth': None, - }, - 'standard-small': { - 'id': 'standard-small', - 'name': 'Standard/Small instance', - 'cpu': 1100, - 'memory': 1741, - 'disk': 50, - 'bandwidth': None, - }, - 'standard-large': { - 'id': 'standard-large', - 'name': 'Standard/Large instance', - 'cpu': 4400, - 'memory': 7680, - 'disk': 250, - 'bandwidth': None, - }, - 'standard-extra-large': { - 'id': 'standard-extra-large', - 'name': 'Standard/Extra Large instance', - 'cpu': 8800, - 'memory': 15360, - 'disk': 500, - 'bandwidth': None, - }, - 'high-memory-extra-large': { - 'id': 'high-memory-extra-large', - 'name': 'High Memory/Extra Large instance', - 'cpu': 7150, - 'memory': 17510, - 'disk': 250, - 'bandwidth': None, - }, - 'high-memory-double-extra-large': { - 'id': 'high-memory-double-extra-large', - 'name': 'High Memory/Double Extra Large instance', - 'cpu': 14300, - 'memory': 32768, - 'disk': 500, - 'bandwidth': None, - }, - 'high-cpu-medium': { - 'id': 'high-cpu-medium', - 'name': 'High CPU/Medium instance', - 'cpu': 5500, - 'memory': 1741, - 'disk': 150, - 'bandwidth': None, - }, - 'high-cpu-extra-large': { - 'id': 'high-cpu-extra-large', - 'name': 'High CPU/Extra Large instance', - 'cpu': 20000, - 'memory': 7168, - 'disk': 500, - 'bandwidth': None, - } -} -NODE_STATE_MAP = { - 'active': NodeState.RUNNING, - 'stopped': NodeState.TERMINATED, - 'dead': NodeState.TERMINATED, - 'dumped': NodeState.TERMINATED, -} +class CloudSigmaNodeDriver(NodeDriver): + name = 'CloudSigma' + website = 'http://www.cloudsigma.com/' + + def __new__(cls, key, secret=None, secure=True, host=None, port=None, + api_version=DEFAULT_API_VERSION, **kwargs): + if cls is CloudSigmaNodeDriver: + if api_version == '1.0': + cls = CloudSigma_1_0_NodeDriver + elif api_version == '2.0': + cls = CloudSigma_2_0_NodeDriver + else: + raise NotImplementedError('Unsupported API version: %s' % + (api_version)) + return super(CloudSigmaNodeDriver, cls).__new__(cls) -# Default timeout (in seconds) for the drive imaging process -IMAGING_TIMEOUT = 20 * 60 class CloudSigmaException(Exception): def __str__(self): @@ -135,13 +84,33 @@ def __repr__(self): return "" % (self.args[0]) + class CloudSigmaInsufficientFundsException(Exception): def __repr__(self): return "" % (self.args[0]) -class CloudSigmaResponse(Response): + +class CloudSigmaNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, self.disk, + self.bandwidth, self.price, self.driver.name)) + + +class CloudSigma_1_0_Response(Response): def success(self): - if self.status == 401: + if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError() return self.status >= 200 and self.status <= 299 @@ -155,45 +124,58 @@ def parse_error(self): return 'Error: %s' % (self.body.replace('errors:', '').strip()) -class CloudSigmaNodeSize(NodeSize): - def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): - self.id = id - self.name = name - self.cpu = cpu - self.ram = ram - self.disk = disk - self.bandwidth = bandwidth - self.price = price - self.driver = driver - def __repr__(self): - return (('') - % (self.id, self.name, self.cpu, self.ram, self.disk, self.bandwidth, - self.price, self.driver.name)) - -class CloudSigmaBaseConnection(ConnectionUserAndKey): - host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] - responseCls = CloudSigmaResponse +class CloudSigma_1_0_Connection(ConnectionUserAndKey): + host = API_ENDPOINTS_1_0[DEFAULT_REGION]['host'] + responseCls = CloudSigma_1_0_Response def add_default_headers(self, headers): headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' - headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key))) - + headers['Authorization'] = 'Basic %s' % (base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) return headers -class CloudSigmaBaseNodeDriver(NodeDriver): + +class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver): type = Provider.CLOUDSIGMA - name = 'CloudSigma' - connectionCls = CloudSigmaBaseConnection + name = 'CloudSigma (API v1.0)' + website = 'http://www.cloudsigma.com/' + connectionCls = CloudSigma_1_0_Connection + + IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive + # imaging process + + NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'stopped': NodeState.TERMINATED, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, + } + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region=DEFAULT_REGION, **kwargs): + if region not in API_ENDPOINTS_1_0: + raise ValueError('Invalid region: %s' % (region)) + + self._host_argument_set = host is not None + self.api_name = 'cloudsigma_%s' % (region) + super(CloudSigma_1_0_NodeDriver, self).__init__(key=key, secret=secret, + secure=secure, + host=host, + port=port, + region=region, + **kwargs) def reboot_node(self, node): """ Reboot a node. - Because Cloudsigma API does not provide native reboot call, it's emulated using stop and start. + Because Cloudsigma API does not provide native reboot call, + it's emulated using stop and start. + + @inherits: :class:`NodeDriver.reboot_node` """ node = self._get_node(node.id) state = node.state @@ -204,7 +186,8 @@ stopped = True if not stopped: - raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + raise CloudSigmaException( + 'Could not stop node with id %s' % (node.id)) success = self.ex_start_node(node) @@ -215,6 +198,8 @@ Destroy a node (all the drives associated with it are NOT destroyed). If a node is still running, it's stopped before it's destroyed. + + @inherits: :class:`NodeDriver.destroy_node` """ node = self._get_node(node.id) state = node.state @@ -226,48 +211,51 @@ stopped = True if not stopped: - raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + raise CloudSigmaException( + 'Could not stop node with id %s' % (node.id)) - response = self.connection.request(action = '/servers/%s/destroy' % (node.id), - method = 'POST') + response = self.connection.request( + action='/servers/%s/destroy' % (node.id), + method='POST') return response.status == 204 def list_images(self, location=None): """ - Return a list of available standard images (this call might take up to 15 seconds to return). + Return a list of available standard images (this call might take up + to 15 seconds to return). + + @inherits: :class:`NodeDriver.list_images` """ - response = self.connection.request(action = '/drives/standard/info').object + response = self.connection.request( + action='/drives/standard/info').object images = [] for value in response: if value.get('type'): if value['type'] == 'disk': - image = NodeImage(id = value['drive'], name = value['name'], driver = self.connection.driver, - extra = {'size': value['size']}) + image = NodeImage(id=value['drive'], name=value['name'], + driver=self.connection.driver, + extra={'size': value['size']}) images.append(image) return images - def list_sizes(self, location = None): - """ - Return a list of available node sizes. - """ + def list_sizes(self, location=None): sizes = [] - for key, value in INSTANCE_TYPES.iteritems(): - size = CloudSigmaNodeSize(id = value['id'], name = value['name'], - cpu = value['cpu'], ram = value['memory'], - disk = value['disk'], bandwidth = value['bandwidth'], - price = self._get_size_price(size_id=key), - driver = self.connection.driver) + for value in INSTANCE_TYPES: + key = value['id'] + size = CloudSigmaNodeSize(id=value['id'], name=value['name'], + cpu=value['cpu'], ram=value['memory'], + disk=value['disk'], + bandwidth=value['bandwidth'], + price=self._get_size_price(size_id=key), + driver=self.connection.driver) sizes.append(size) return sizes def list_nodes(self): - """ - Return a list of nodes. - """ - response = self.connection.request(action = '/servers/info').object + response = self.connection.request(action='/servers/info').object nodes = [] for data in response: @@ -280,68 +268,88 @@ """ Creates a CloudSigma instance - See L{NodeDriver.create_node} for more keyword args. + @inherits: :class:`NodeDriver.create_node` - @keyword name: String with a name for this new node (required) - @type name: C{string} + :keyword name: String with a name for this new node (required) + :type name: ``str`` - @keyword smp: Number of virtual processors or None to calculate based on the cpu speed - @type smp: C{int} + :keyword smp: Number of virtual processors or None to calculate + based on the cpu speed + :type smp: ``int`` - @keyword nic_model: e1000, rtl8139 or virtio (is not specified, e1000 is used) - @type nic_model: C{string} + :keyword nic_model: e1000, rtl8139 or virtio (is not specified, + e1000 is used) + :type nic_model: ``str`` - @keyword vnc_password: If not set, VNC access is disabled. - @type vnc_password: C{bool} + :keyword vnc_password: If not set, VNC access is disabled. + :type vnc_password: ``bool`` + + :keyword drive_type: Drive type (ssd|hdd). Defaults to hdd. + :type drive_type: ``str`` """ size = kwargs['size'] image = kwargs['image'] smp = kwargs.get('smp', 'auto') nic_model = kwargs.get('nic_model', 'e1000') vnc_password = kwargs.get('vnc_password', None) + drive_type = kwargs.get('drive_type', 'hdd') if nic_model not in ['e1000', 'rtl8139', 'virtio']: raise CloudSigmaException('Invalid NIC model specified') - drive_data = {} - drive_data.update({'name': kwargs['name'], 'size': '%sG' % (kwargs['size'].disk)}) + if drive_type not in ['hdd', 'ssd']: + raise CloudSigmaException('Invalid drive type "%s". Valid types' + ' are: hdd, ssd' % (drive_type)) - response = self.connection.request(action = '/drives/%s/clone' % image.id, data = dict2str(drive_data), - method = 'POST').object + drive_data = {} + drive_data.update({'name': kwargs['name'], + 'size': '%sG' % (kwargs['size'].disk), + 'driveType': drive_type}) + + response = self.connection.request( + action='/drives/%s/clone' % image.id, + data=dict2str(drive_data), + method='POST').object if not response: raise CloudSigmaException('Drive creation failed') drive_uuid = response[0]['drive'] - response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid)).object imaging_start = time.time() - while response[0].has_key('imaging'): - response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + while 'imaging' in response[0]: + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid)).object elapsed_time = time.time() - imaging_start - if response[0].has_key('imaging') and elapsed_time >= IMAGING_TIMEOUT: + timed_out = elapsed_time >= self.IMAGING_TIMEOUT + if 'imaging' in response[0] and timed_out: raise CloudSigmaException('Drive imaging timed out') time.sleep(1) node_data = {} - node_data.update({'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, 'ide:0:0': drive_uuid, - 'boot': 'ide:0:0', 'smp': smp}) + node_data.update( + {'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, + 'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp}) node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) if vnc_password: node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) - response = self.connection.request(action = '/servers/create', data = dict2str(node_data), - method = 'POST').object + response = self.connection.request(action='/servers/create', + data=dict2str(node_data), + method='POST').object if not isinstance(response, list): - response = [ response ] + response = [response] node = self._to_node(response[0]) if node is None: # Insufficient funds, destroy created drive self.ex_drive_destroy(drive_uuid) - raise CloudSigmaInsufficientFundsException('Insufficient funds, node creation failed') + raise CloudSigmaInsufficientFundsException( + 'Insufficient funds, node creation failed') # Start the node after it has been created started = self.ex_start_node(node) @@ -354,13 +362,20 @@ def ex_destroy_node_and_drives(self, node): """ Destroy a node and all the drives associated with it. + + :param node: Node which should be used + :type node: :class:`libcloud.compute.base.Node` + + :rtype: ``bool`` """ node = self._get_node_info(node) drive_uuids = [] - for key, value in node.iteritems(): - if (key.startswith('ide:') or key.startswith('scsi') or key.startswith('block')) and \ - not (key.endswith(':bytes') or key.endswith(':requests') or key.endswith('media')): + for key, value in node.items(): + if (key.startswith('ide:') or key.startswith( + 'scsi') or key.startswith('block')) and\ + not (key.endswith(':bytes') or + key.endswith(':requests') or key.endswith('media')): drive_uuids.append(value) node_destroyed = self.destroy_node(self._to_node(node)) @@ -376,8 +391,11 @@ def ex_static_ip_list(self): """ Return a list of available static IP addresses. + + :rtype: ``list`` of ``str`` """ - response = self.connection.request(action = '/resources/ip/list', method = 'GET') + response = self.connection.request(action='/resources/ip/list', + method='GET') if response.status != 200: raise CloudSigmaException('Could not retrieve IP list') @@ -388,17 +406,22 @@ def ex_drives_list(self): """ Return a list of all the available drives. + + :rtype: ``list`` of ``dict`` """ - response = self.connection.request(action = '/drives/info', method = 'GET') + response = self.connection.request(action='/drives/info', method='GET') result = str2dicts(response.body) return result def ex_static_ip_create(self): """ - Create a new static IP address. + Create a new static IP address.p + + :rtype: ``list`` of ``dict`` """ - response = self.connection.request(action = '/resources/ip/create', method = 'GET') + response = self.connection.request(action='/resources/ip/create', + method='GET') result = str2dicts(response.body) return result @@ -406,8 +429,14 @@ def ex_static_ip_destroy(self, ip_address): """ Destroy a static IP address. + + :param ip_address: IP address which should be used + :type ip_address: ``str`` + + :rtype: ``bool`` """ - response = self.connection.request(action = '/resources/ip/%s/destroy' % (ip_address), method = 'GET') + response = self.connection.request( + action='/resources/ip/%s/destroy' % (ip_address), method='GET') return response.status == 204 @@ -415,23 +444,40 @@ """ Destroy a drive with a specified uuid. If the drive is currently mounted an exception is thrown. + + :param drive_uuid: Drive uuid which should be used + :type drive_uuid: ``str`` + + :rtype: ``bool`` """ - response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), method = 'POST') + response = self.connection.request( + action='/drives/%s/destroy' % (drive_uuid), method='POST') return response.status == 204 - def ex_set_node_configuration(self, node, **kwargs): """ Update a node configuration. Changing most of the parameters requires node to be stopped. + + :param node: Node which should be used + :type node: :class:`libcloud.compute.base.Node` + + :param kwargs: keyword arguments + :type kwargs: ``dict`` + + :rtype: ``bool`` """ - valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', '^boot$', '^nic:0:model$', '^nic:0:dhcp', - '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', '^vnc:ip$', '^vnc:password$', '^vnc:tls', - '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', + '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', + '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', + '^block:[0-7](:media)?$') invalid_keys = [] - for key in kwargs.keys(): + keys = list(kwargs.keys()) + for key in keys: matches = False for regex in valid_keys: if re.match(regex, key): @@ -441,48 +487,82 @@ invalid_keys.append(key) if invalid_keys: - raise CloudSigmaException('Invalid configuration key specified: %s' % (',' .join(invalid_keys))) - - response = self.connection.request(action = '/servers/%s/set' % (node.id), data = dict2str(kwargs), - method = 'POST') + raise CloudSigmaException( + 'Invalid configuration key specified: %s' % + (',' .join(invalid_keys))) + + response = self.connection.request( + action='/servers/%s/set' % (node.id), + data=dict2str(kwargs), + method='POST') return (response.status == 200 and response.body != '') def ex_start_node(self, node): """ Start a node. + + :param node: Node which should be used + :type node: :class:`libcloud.compute.base.Node` + + :rtype: ``bool`` """ - response = self.connection.request(action = '/servers/%s/start' % (node.id), - method = 'POST') + response = self.connection.request( + action='/servers/%s/start' % (node.id), + method='POST') return response.status == 200 def ex_stop_node(self, node): """ Stop (shutdown) a node. + + :param node: Node which should be used + :type node: :class:`libcloud.compute.base.Node` + + :rtype: ``bool`` """ - response = self.connection.request(action = '/servers/%s/stop' % (node.id), - method = 'POST') + response = self.connection.request( + action='/servers/%s/stop' % (node.id), + method='POST') return response.status == 204 def ex_shutdown_node(self, node): """ Stop (shutdown) a node. + + @inherits: :class:`CloudSigmaBaseNodeDriver.ex_stop_node` """ return self.ex_stop_node(node) def ex_destroy_drive(self, drive_uuid): """ Destroy a drive. + + :param drive_uuid: Drive uuid which should be used + :type drive_uuid: ``str`` + + :rtype: ``bool`` """ - response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), - method = 'POST') + response = self.connection.request( + action='/drives/%s/destroy' % (drive_uuid), + method='POST') return response.status == 204 + def _ex_connection_class_kwargs(self): + """ + Return the host value based on the user supplied region. + """ + kwargs = {} + if not self._host_argument_set: + kwargs['host'] = API_ENDPOINTS_1_0[self.region]['host'] + + return kwargs + def _to_node(self, data): if data: try: - state = NODE_STATE_MAP[data['status']] + state = self.NODE_STATE_MAP[data['status']] except KeyError: state = NodeState.UNKNOWN @@ -491,17 +571,18 @@ # creation failed because of insufficient funds. return None - public_ip = [] - if data.has_key('nic:0:dhcp'): + public_ips = [] + if 'nic:0:dhcp' in data: if isinstance(data['nic:0:dhcp'], list): - public_ip = data['nic:0:dhcp'] + public_ips = data['nic:0:dhcp'] else: - public_ip = [data['nic:0:dhcp']] + public_ips = [data['nic:0:dhcp']] extra = {} - extra_keys = [ ('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), ('status', 'str') ] + extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), + ('status', 'str')] for key, value_type in extra_keys: - if data.has_key(key): + if key in data: value = data[key] if value_type == 'int': @@ -514,12 +595,14 @@ extra.update({key: value}) - if data.has_key('vnc:ip') and data.has_key('vnc:password'): - extra.update({'vnc_ip': data['vnc:ip'], 'vnc_password': data['vnc:password']}) - - node = Node(id = data['server'], name = data['name'], state = state, - public_ip = public_ip, private_ip = None, driver = self.connection.driver, - extra = extra) + if 'vnc:ip' in data and 'vnc:password' in data: + extra.update({'vnc_ip': data['vnc:ip'], + 'vnc_password': data['vnc:password']}) + + node = Node(id=data['server'], name=data['name'], state=state, + public_ips=public_ips, private_ips=None, + driver=self.connection.driver, + extra=extra) return node return None @@ -529,25 +612,1482 @@ node = [node for node in nodes if node.id == node.id] if not node: - raise CloudSigmaException('Node with id %s does not exist' % (node_id)) + raise CloudSigmaException( + 'Node with id %s does not exist' % (node_id)) return node[0] def _get_node_info(self, node): - response = self.connection.request(action = '/servers/%s/info' % (node.id)) + response = self.connection.request( + action='/servers/%s/info' % (node.id)) result = str2dicts(response.body) return result[0] -class CloudSigmaZrhConnection(CloudSigmaBaseConnection): + +class CloudSigmaZrhConnection(CloudSigma_1_0_Connection): """ Connection class for the CloudSigma driver for the Zurich end-point """ - host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + host = API_ENDPOINTS_1_0['zrh']['host'] -class CloudSigmaZrhNodeDriver(CloudSigmaBaseNodeDriver): + +class CloudSigmaZrhNodeDriver(CloudSigma_1_0_NodeDriver): """ CloudSigma node driver for the Zurich end-point """ connectionCls = CloudSigmaZrhConnection api_name = 'cloudsigma_zrh' + + +class CloudSigmaLvsConnection(CloudSigma_1_0_Connection): + """ + Connection class for the CloudSigma driver for the Las Vegas end-point + """ + host = API_ENDPOINTS_1_0['lvs']['host'] + + +class CloudSigmaLvsNodeDriver(CloudSigma_1_0_NodeDriver): + """ + CloudSigma node driver for the Las Vegas end-point + """ + connectionCls = CloudSigmaLvsConnection + api_name = 'cloudsigma_lvs' + + +class CloudSigmaError(ProviderError): + """ + Represents CloudSigma API error. + """ + + def __init__(self, http_code, error_type, error_msg, error_point, driver): + """ + :param http_code: HTTP status code. + :type http_code: ``int`` + + :param error_type: Type of error (validation / notexist / backend / + permissions database / concurrency / billing / + payment) + :type error_type: ``str`` + + :param error_msg: A description of the error that occurred. + :type error_msg: ``str`` + + :param error_point: Point at which the error occurred. Can be None. + :type error_point: ``str`` or ``None`` + """ + super(CloudSigmaError, self).__init__(http_code=http_code, + value=error_msg, driver=driver) + self.error_type = error_type + self.error_msg = error_msg + self.error_point = error_point + + +class CloudSigmaSubscription(object): + """ + Represents CloudSigma subscription. + """ + + def __init__(self, id, resource, amount, period, status, price, start_time, + end_time, auto_renew, subscribed_object=None): + """ + :param id: Subscription ID. + :type id: ``str`` + + :param resource: Resource (e.g vlan, ip, etc.). + :type resource: ``str`` + + :param period: Subscription period. + :type period: ``str`` + + :param status: Subscription status (active / inactive). + :type status: ``str`` + + :param price: Subscription price. + :type price: ``str`` + + :param start_time: Start time for this subscription. + :type start_time: ``datetime.datetime`` + + :param end_time: End time for this subscription. + :type end_time: ``datetime.datetime`` + + :param auto_renew: True if the subscription is auto renewed. + :type auto_renew: ``bool`` + + :param subscribed_object: Optional UUID of the subscribed object. + :type subscribed_object: ``str`` + """ + self.id = id + self.resource = resource + self.amount = amount + self.period = period + self.status = status + self.price = price + self.start_time = start_time + self.end_time = end_time + self.auto_renew = auto_renew + self.subscribed_object = subscribed_object + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return ('' % + (self.id, self.resource, self.amount, self.period, + self.subscribed_object)) + + +class CloudSigmaTag(object): + """ + Represents a CloudSigma tag object. + """ + + def __init__(self, id, name, resources=None): + """ + :param id: Tag ID. + :type id: ``str`` + + :param name: Tag name. + :type name: ``str`` + + :param resource: IDs of resources which are associated with this tag. + :type resources: ``list`` of ``str`` + """ + self.id = id + self.name = name + self.resources = resources if resources else [] + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return ('' % + (self.id, self.name, repr(self.resources))) + + +class CloudSigmaDrive(NodeImage): + """ + Represents a CloudSigma drive. + """ + + def __init__(self, id, name, size, media, status, driver, extra=None): + """ + :param id: Drive ID. + :type id: ``str`` + + :param name: Drive name. + :type name: ``str`` + + :param size: Drive size (in bytes). + :type size: ``int`` + + :param media: Drive media (cdrom / disk). + :type media: ``str`` + + :param status: Drive status (unmounted / mounted). + :type status: ``str`` + """ + super(CloudSigmaDrive, self).__init__(id=id, name=name, driver=driver, + extra=extra) + self.size = size + self.media = media + self.status = status + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return (('') % + (self.id, self.name, self.size, self.media, self.status)) + + +class CloudSigmaFirewallPolicy(object): + """ + Represents a CloudSigma firewall policy. + """ + + def __init__(self, id, name, rules): + """ + :param id: Policy ID. + :type id: ``str`` + + :param name: Policy name. + :type name: ``str`` + + :param rules: Rules associated with this policy. + :type rules: ``list`` of :class:`.CloudSigmaFirewallPolicyRule` objects + """ + self.id = id + self.name = name + self.rules = rules if rules else [] + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return (('') % + (self.id, self.name, repr(self.rules))) + + +class CloudSigmaFirewallPolicyRule(object): + """ + Represents a CloudSigma firewall policy rule. + """ + + def __init__(self, action, direction, ip_proto=None, src_ip=None, + src_port=None, dst_ip=None, dst_port=None, comment=None): + """ + :param action: Action (drop / accept). + :type action: ``str`` + + :param direction: Rule direction (in / out / both)> + :type direction: ``str`` + + :param ip_proto: IP protocol (tcp / udp). + :type ip_proto: ``str``. + + :param src_ip: Source IP in CIDR notation. + :type src_ip: ``str`` + + :param src_port: Source port or a port range. + :type src_port: ``str`` + + :param dst_ip: Destination IP in CIDR notation. + :type dst_ip: ``str`` + + :param src_port: Destination port or a port range. + :type src_port: ``str`` + + :param comment: Comment associated with the policy. + :type comment: ``str`` + """ + self.action = action + self.direction = direction + self.ip_proto = ip_proto + self.src_ip = src_ip + self.src_port = src_port + self.dst_ip = dst_ip + self.dst_port = dst_port + self.comment = comment + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return (('') % + (self.action, self.direction)) + + +class CloudSigma_2_0_Response(JsonResponse): + success_status_codes = [ + httplib.OK, + httplib.ACCEPTED, + httplib.NO_CONTENT, + httplib.CREATED + ] + + def success(self): + return self.status in self.success_status_codes + + def parse_error(self): + if int(self.status) == httplib.UNAUTHORIZED: + raise InvalidCredsError('Invalid credentials') + + body = self.parse_body() + errors = self._parse_errors_from_body(body=body) + + if errors: + # Throw first error + raise errors[0] + + return body + + def _parse_errors_from_body(self, body): + """ + Parse errors from the response body. + + :return: List of error objects. + :rtype: ``list`` of :class:`.CloudSigmaError` objects + """ + errors = [] + + if not isinstance(body, list): + return None + + for item in body: + if 'error_type' not in item: + # Unrecognized error + continue + + error = CloudSigmaError(http_code=self.status, + error_type=item['error_type'], + error_msg=item['error_message'], + error_point=item['error_point'], + driver=self.connection.driver) + errors.append(error) + + return errors + + +class CloudSigma_2_0_Connection(ConnectionUserAndKey): + host = API_ENDPOINTS_2_0[DEFAULT_REGION]['host'] + responseCls = CloudSigma_2_0_Response + api_prefix = '/api/2.0' + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + + headers['Authorization'] = 'Basic %s' % (base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) + return headers + + def encode_data(self, data): + data = json.dumps(data) + return data + + def request(self, action, params=None, data=None, headers=None, + method='GET', raw=False): + params = params or {} + action = self.api_prefix + action + + if method == 'GET': + params['limit'] = 0 # we want all the items back + + return super(CloudSigma_2_0_Connection, self).request(action=action, + params=params, + data=data, + headers=headers, + method=method, + raw=raw) + + +class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver): + """ + Driver for CloudSigma API v2.0. + """ + name = 'CloudSigma (API v2.0)' + api_name = 'cloudsigma_zrh' + website = 'http://www.cloudsigma.com/' + connectionCls = CloudSigma_2_0_Connection + + # Default drive transition timeout in seconds + DRIVE_TRANSITION_TIMEOUT = 500 + + # How long to sleep between different polling periods while waiting for + # drive transition + DRIVE_TRANSITION_SLEEP_INTERVAL = 5 + + NODE_STATE_MAP = { + 'starting': NodeState.PENDING, + 'stopping': NodeState.PENDING, + 'unavailable': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'stopped': NodeState.STOPPED, + 'paused': NodeState.STOPPED + } + + def __init__(self, key, secret, secure=True, host=None, port=None, + region=DEFAULT_REGION, **kwargs): + if region not in API_ENDPOINTS_2_0: + raise ValueError('Invalid region: %s' % (region)) + + if not secure: + # CloudSigma drive uses Basic Auth authentication and we don't want + # to allow user to accidentally send credentials over the wire in + # plain-text + raise ValueError('CloudSigma driver only supports a ' + 'secure connection') + + self._host_argument_set = host is not None + super(CloudSigma_2_0_NodeDriver, self).__init__(key=key, secret=secret, + secure=secure, + host=host, port=port, + region=region, + **kwargs) + + def list_nodes(self, ex_tag=None): + """ + List available nodes. + + :param ex_tag: If specified, only return servers tagged with the + provided tag. + :type ex_tag: :class:`CloudSigmaTag` + """ + if ex_tag: + action = '/tags/%s/servers/detail/' % (ex_tag.id) + else: + action = '/servers/detail/' + + response = self.connection.request(action=action, method='GET').object + nodes = [self._to_node(data=item) for item in response['objects']] + return nodes + + def list_sizes(self): + """ + List available sizes. + """ + sizes = [] + for value in INSTANCE_TYPES: + key = value['id'] + size = CloudSigmaNodeSize(id=value['id'], name=value['name'], + cpu=value['cpu'], ram=value['memory'], + disk=value['disk'], + bandwidth=value['bandwidth'], + price=self._get_size_price(size_id=key), + driver=self.connection.driver) + sizes.append(size) + + return sizes + + def list_images(self): + """ + Return a list of available pre-installed library drives. + + Note: If you want to list all the available library drives (both + pre-installed and installation CDs), use :meth:`ex_list_library_drives` + method. + """ + response = self.connection.request(action='/libdrives/').object + images = [self._to_image(data=item) for item in response['objects']] + + # We filter out non pre-installed library drives by default because + # they can't be used directly following a default Libcloud server + # creation flow. + images = [image for image in images if + image.extra['image_type'] == 'preinst'] + return images + + def create_node(self, name, size, image, ex_metadata=None, + ex_vnc_password=None, ex_avoid=None, ex_vlan=None): + """ + Create a new server. + + Server creation consists multiple steps depending on the type of the + image used. + + 1. Installation CD: + + 1. Create a server and attach installation cd + 2. Start a server + + 2. Pre-installed image: + + 1. Clone provided library drive so we can use it + 2. Resize cloned drive to the desired size + 3. Create a server and attach cloned drive + 4. Start a server + + :param ex_metadata: Key / value pairs to associate with the + created node. (optional) + :type ex_metadata: ``dict`` + + :param ex_vnc_password: Password to use for VNC access. If not + provided, random password is generated. + :type ex_vnc_password: ``str`` + + :param ex_avoid: A list of server UUIDs to avoid when starting this + node. (optional) + :type ex_avoid: ``list`` + + :param ex_vlan: Optional UUID of a VLAN network to use. If specified, + server will have two nics assigned - 1 with a public ip + and 1 with the provided VLAN. + :type ex_vlan: ``str`` + """ + is_installation_cd = self._is_installation_cd(image=image) + + if ex_vnc_password: + vnc_password = ex_vnc_password + else: + # VNC password is not provided, generate a random one. + vnc_password = get_secure_random_string(size=12) + + drive_name = '%s-drive' % (name) + + # size is specified in GB + drive_size = (size.disk * 1024 * 1024 * 1024) + + if not is_installation_cd: + # 1. Clone library drive so we can use it + drive = self.ex_clone_drive(drive=image, name=drive_name) + + # Wait for drive clone to finish + drive = self._wait_for_drive_state_transition(drive=drive, + state='unmounted') + + # 2. Resize drive to the desired disk size if the desired disk size + # is larger than the cloned drive size. + if drive_size > drive.size: + drive = self.ex_resize_drive(drive=drive, size=drive_size) + + # Wait for drive resize to finish + drive = self._wait_for_drive_state_transition(drive=drive, + state='unmounted') + else: + # No need to clone installation CDs + drive = image + + # 3. Create server and attach cloned drive + # ide 0:0 + data = {} + data['name'] = name + data['cpu'] = size.cpu + data['mem'] = (size.ram * 1024 * 1024) + data['vnc_password'] = vnc_password + + if ex_metadata: + data['meta'] = ex_metadata + + # Assign 1 public interface (DHCP) to the node + nic = { + 'boot_order': None, + 'ip_v4_conf': { + 'conf': 'dhcp', + }, + 'ip_v6_conf': None + } + + nics = [nic] + + if ex_vlan: + # Assign another interface for VLAN + nic = { + 'boot_order': None, + 'ip_v4_conf': None, + 'ip_v6_conf': None, + 'vlan': ex_vlan + } + nics.append(nic) + + # Need to use IDE for installation CDs + if is_installation_cd: + device_type = 'ide' + else: + device_type = 'virtio' + + drive = { + 'boot_order': 1, + 'dev_channel': '0:0', + 'device': device_type, + 'drive': drive.id + } + + drives = [drive] + + data['nics'] = nics + data['drives'] = drives + + action = '/servers/' + response = self.connection.request(action=action, method='POST', + data=data) + node = self._to_node(response.object['objects'][0]) + + # 4. Start server + self.ex_start_node(node=node, ex_avoid=ex_avoid) + + return node + + def destroy_node(self, node): + """ + Destroy the node and all the associated drives. + + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` + """ + action = '/servers/%s/' % (node.id) + params = {'recurse': 'all_drives'} + response = self.connection.request(action=action, method='DELETE', + params=params) + return response.status == httplib.NO_CONTENT + + # Server extension methods + + def ex_edit_node(self, node, params): + """ + Edit a node. + + :param node: Node to edit. + :type node: :class:`libcloud.compute.base.Node` + + :param params: Node parameters to update. + :type params: ``dict`` + + :return Edited node. + :rtype: :class:`libcloud.compute.base.Node` + """ + data = {} + + # name, cpu, mem and vnc_password attributes must always be present so + # we just copy them from the to-be-edited node + data['name'] = node.name + data['cpu'] = node.extra['cpu'] + data['mem'] = node.extra['mem'] + data['vnc_password'] = node.extra['vnc_password'] + + nics = copy.deepcopy(node.extra.get('nics', [])) + + data['nics'] = nics + + data.update(params) + + action = '/servers/%s/' % (node.id) + response = self.connection.request(action=action, method='PUT', + data=data).object + node = self._to_node(data=response) + return node + + def ex_start_node(self, node, ex_avoid=None): + """ + Start a node. + + :param node: Node to start. + :type node: :class:`libcloud.compute.base.Node` + + :param ex_avoid: A list of other server uuids to avoid when + starting this node. If provided, node will + attempt to be started on a different + physical infrastructure from other servers + specified using this argument. (optional) + :type ex_avoid: ``list`` + """ + params = {} + + if ex_avoid: + params['avoid'] = ','.join(ex_avoid) + + path = '/servers/%s/action/' % (node.id) + response = self._perform_action(path=path, action='start', + params=params, + method='POST') + return response.status == httplib.ACCEPTED + + def ex_stop_node(self, node): + """ + Stop a node. + """ + path = '/servers/%s/action/' % (node.id) + response = self._perform_action(path=path, action='stop', + method='POST') + return response.status == httplib.ACCEPTED + + def ex_clone_node(self, node, name=None, random_vnc_password=None): + """ + Clone the provided node. + + :param name: Optional name for the cloned node. + :type name: ``str`` + :param random_vnc_password: If True, a new random VNC password will be + generated for the cloned node. Otherwise + password from the cloned node will be + reused. + :type random_vnc_password: ``bool`` + + :return: Cloned node. + :rtype: :class:`libcloud.compute.base.Node` + """ + data = {} + + data['name'] = name + data['random_vnc_password'] = random_vnc_password + + path = '/servers/%s/action/' % (node.id) + response = self._perform_action(path=path, action='clone', + method='POST', data=data).object + node = self._to_node(data=response) + return node + + def ex_open_vnc_tunnel(self, node): + """ + Open a VNC tunnel to the provided node and return the VNC url. + + :param node: Node to open the VNC tunnel to. + :type node: :class:`libcloud.compute.base.Node` + + :return: URL of the opened VNC tunnel. + :rtype: ``str`` + """ + path = '/servers/%s/action/' % (node.id) + response = self._perform_action(path=path, action='open_vnc', + method='POST').object + vnc_url = response['vnc_url'] + return vnc_url + + def ex_close_vnc_tunnel(self, node): + """ + Close a VNC server to the provided node. + + :param node: Node to close the VNC tunnel to. + :type node: :class:`libcloud.compute.base.Node` + + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` + """ + path = '/servers/%s/action/' % (node.id) + response = self._perform_action(path=path, action='close_vnc', + method='POST') + return response.status == httplib.ACCEPTED + + # Drive extension methods + + def ex_list_library_drives(self): + """ + Return a list of all the available library drives (pre-installed and + installation CDs). + + :rtype: ``list`` of :class:`.CloudSigmaDrive` objects + """ + response = self.connection.request(action='/libdrives/').object + drives = [self._to_drive(data=item) for item in response['objects']] + return drives + + def ex_list_user_drives(self): + """ + Return a list of all the available user's drives. + + :rtype: ``list`` of :class:`.CloudSigmaDrive` objects + """ + response = self.connection.request(action='/drives/detail/').object + drives = [self._to_drive(data=item) for item in response['objects']] + return drives + + def ex_create_drive(self, name, size, media='disk', ex_avoid=None): + """ + Create a new drive. + + :param name: Drive name. + :type name: ``str`` + + :param size: Drive size in bytes. + :type size: ``int`` + + :param media: Drive media type (cdrom, disk). + :type media: ``str`` + + :param ex_avoid: A list of other drive uuids to avoid when + creating this drive. If provided, drive will + attempt to be created on a different + physical infrastructure from other drives + specified using this argument. (optional) + :type ex_avoid: ``list`` + + :return: Created drive object. + :rtype: :class:`.CloudSigmaDrive` + """ + params = {} + data = { + 'name': name, + 'size': size, + 'media': media + } + + if ex_avoid: + params['avoid'] = ','.join(ex_avoid) + + action = '/drives/' + response = self.connection.request(action=action, method='POST', + params=params, data=data).object + drive = self._to_drive(data=response['objects'][0]) + return drive + + def ex_clone_drive(self, drive, name=None, ex_avoid=None): + """ + Clone a library or a standard drive. + + :param drive: Drive to clone. + :type drive: :class:`libcloud.compute.base.NodeImage` or + :class:`.CloudSigmaDrive` + + :param name: Optional name for the cloned drive. + :type name: ``str`` + + :param ex_avoid: A list of other drive uuids to avoid when + creating this drive. If provided, drive will + attempt to be created on a different + physical infrastructure from other drives + specified using this argument. (optional) + :type ex_avoid: ``list`` + + :return: New cloned drive. + :rtype: :class:`.CloudSigmaDrive` + """ + params = {} + data = {} + + if ex_avoid: + params['avoid'] = ','.join(ex_avoid) + + if name: + data['name'] = name + + path = '/drives/%s/action/' % (drive.id) + response = self._perform_action(path=path, action='clone', + params=params, data=data, + method='POST') + drive = self._to_drive(data=response.object['objects'][0]) + return drive + + def ex_resize_drive(self, drive, size): + """ + Resize a drive. + + :param drive: Drive to resize. + + :param size: New drive size in bytes. + :type size: ``int`` + + :return: Drive object which is being resized. + :rtype: :class:`.CloudSigmaDrive` + """ + path = '/drives/%s/action/' % (drive.id) + data = {'name': drive.name, 'size': size, 'media': 'disk'} + response = self._perform_action(path=path, action='resize', + method='POST', data=data) + + drive = self._to_drive(data=response.object['objects'][0]) + return drive + + def ex_attach_drive(self, node): + """ + Attach a drive to the provided node. + """ + # TODO + pass + + def ex_get_drive(self, drive_id): + """ + Retrieve information about a single drive. + + :param drive_id: ID of the drive to retrieve. + :type drive_id: ``str`` + + :return: Drive object. + :rtype: :class:`.CloudSigmaDrive` + """ + action = '/drives/%s/' % (drive_id) + response = self.connection.request(action=action).object + drive = self._to_drive(data=response) + return drive + + # Firewall policies extension methods + + def ex_list_firewall_policies(self): + """ + List firewall policies. + + :rtype: ``list`` of :class:`.CloudSigmaFirewallPolicy` + """ + action = '/fwpolicies/detail/' + response = self.connection.request(action=action, method='GET').object + policies = [self._to_firewall_policy(data=item) for item + in response['objects']] + return policies + + def ex_create_firewall_policy(self, name, rules=None): + """ + Create a firewall policy. + + :param name: Policy name. + :type name: ``str`` + + :param rules: List of firewall policy rules to associate with this + policy. (optional) + :type rules: ``list`` of ``dict`` + + :return: Created firewall policy object. + :rtype: :class:`.CloudSigmaFirewallPolicy` + """ + data = {} + obj = {} + obj['name'] = name + + if rules: + obj['rules'] = rules + + data['objects'] = [obj] + + action = '/fwpolicies/' + response = self.connection.request(action=action, method='POST', + data=data).object + policy = self._to_firewall_policy(data=response['objects'][0]) + return policy + + def ex_attach_firewall_policy(self, policy, node, nic_mac=None): + """ + Attach firewall policy to a public NIC interface on the server. + + :param policy: Firewall policy to attach. + :type policy: :class:`.CloudSigmaFirewallPolicy` + + :param node: Node to attach policy to. + :type node: :class:`libcloud.compute.base.Node` + + :param nic_mac: Optional MAC address of the NIC to add the policy to. + If not specified, first public interface is used + instead. + :type nic_mac: ``str`` + + :return: Node object to which the policy was attached to. + :rtype: :class:`libcloud.compute.base.Node` + """ + nics = copy.deepcopy(node.extra.get('nics', [])) + + if nic_mac: + nic = [n for n in nics if n['mac'] == nic_mac] + else: + nic = nics + + if len(nic) == 0: + raise ValueError('Cannot find the NIC interface to attach ' + 'a policy to') + + nic = nic[0] + nic['firewall_policy'] = policy.id + + params = {'nics': nics} + node = self.ex_edit_node(node=node, params=params) + return node + + def ex_delete_firewall_policy(self, policy): + """ + Delete a firewall policy. + + :param policy: Policy to delete to. + :type policy: :class:`.CloudSigmaFirewallPolicy` + + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` + """ + action = '/fwpolicies/%s/' % (policy.id) + response = self.connection.request(action=action, method='DELETE') + return response.status == httplib.NO_CONTENT + + # Availability groups extension methods + + def ex_list_servers_availability_groups(self): + """ + Return which running servers share the same physical compute host. + + :return: A list of server UUIDs which share the same physical compute + host. Servers which share the same host will be stored under + the same list index. + :rtype: ``list`` of ``list`` + """ + action = '/servers/availability_groups/' + response = self.connection.request(action=action, method='GET') + return response.object + + def ex_list_drives_availability_groups(self): + """ + Return which drives share the same physical storage host. + + :return: A list of drive UUIDs which share the same physical storage + host. Drives which share the same host will be stored under + the same list index. + :rtype: ``list`` of ``list`` + """ + action = '/drives/availability_groups/' + response = self.connection.request(action=action, method='GET') + return response.object + + # Tag extension methods + + def ex_list_tags(self): + """ + List all the available tags. + + :rtype: ``list`` of :class:`.CloudSigmaTag` objects + """ + action = '/tags/detail/' + response = self.connection.request(action=action, method='GET').object + tags = [self._to_tag(data=item) for item in response['objects']] + + return tags + + def ex_get_tag(self, tag_id): + """ + Retrieve a single tag. + + :param tag_id: ID of the tag to retrieve. + :type tag_id: ``str`` + + :rtype: ``list`` of :class:`.CloudSigmaTag` objects + """ + action = '/tags/%s/' % (tag_id) + response = self.connection.request(action=action, method='GET').object + tag = self._to_tag(data=response) + return tag + + def ex_create_tag(self, name, resource_uuids=None): + """ + Create a tag. + + :param name: Tag name. + :type name: ``str`` + + :param resource_uuids: Optional list of resource UUIDs to assign this + tag go. + :type resource_uuids: ``list`` of ``str`` + + :return: Created tag object. + :rtype: :class:`.CloudSigmaTag` + """ + data = {} + data['objects'] = [ + { + 'name': name + } + ] + + if resource_uuids: + data['resources'] = resource_uuids + + action = '/tags/' + response = self.connection.request(action=action, method='POST', + data=data).object + tag = self._to_tag(data=response['objects'][0]) + return tag + + def ex_tag_resource(self, resource, tag): + """ + Associate tag with the provided resource. + + :param resource: Resource to associate a tag with. + :type resource: :class:`libcloud.compute.base.Node` or + :class:`.CloudSigmaDrive` + + :param tag: Tag to associate with the resources. + :type tag: :class:`.CloudSigmaTag` + + :return: Updated tag object. + :rtype: :class:`.CloudSigmaTag` + """ + if not hasattr(resource, 'id'): + raise ValueError('Resource doesn\'t have id attribute') + + return self.ex_tag_resources(resources=[resource], tag=tag) + + def ex_tag_resources(self, resources, tag): + """ + Associate tag with the provided resources. + + :param resources: Resources to associate a tag with. + :type resources: ``list`` of :class:`libcloud.compute.base.Node` or + :class:`.CloudSigmaDrive` + + :param tag: Tag to associate with the resources. + :type tag: :class:`.CloudSigmaTag` + + :return: Updated tag object. + :rtype: :class:`.CloudSigmaTag` + """ + + resources = tag.resources[:] + + for resource in resources: + if not hasattr(resource, 'id'): + raise ValueError('Resource doesn\'t have id attribute') + + resources.append(resource.id) + + resources = list(set(resources)) + + data = { + 'name': tag.name, + 'resources': resources + } + + action = '/tags/%s/' % (tag.id) + response = self.connection.request(action=action, method='PUT', + data=data).object + tag = self._to_tag(data=response) + return tag + + def ex_delete_tag(self, tag): + """ + Delete a tag. + + :param tag: Tag to delete. + :type tag: :class:`.CloudSigmaTag` + + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` + """ + action = '/tags/%s/' % (tag.id) + response = self.connection.request(action=action, method='DELETE') + return response.status == httplib.NO_CONTENT + + # Account extension methods + + def ex_get_balance(self): + """ + Retrueve account balance information. + + :return: Dictionary with two items ("balance" and "currency"). + :rtype: ``dict`` + """ + action = '/balance/' + response = self.connection.request(action=action, method='GET') + return response.object + + def ex_get_pricing(self): + """ + Retrive pricing information that are applicable to the cloud. + + :return: Dictionary with pricing information. + :rtype: ``dict`` + """ + action = '/pricing/' + response = self.connection.request(action=action, method='GET') + return response.object + + def ex_get_usage(self): + """ + Retrieve account current usage information. + + :return: Dictionary with two items ("balance" and "usage"). + :rtype: ``dict`` + """ + action = '/currentusage/' + response = self.connection.request(action=action, method='GET') + return response.object + + def ex_list_subscriptions(self, status='all', resources=None): + """ + List subscriptions for this account. + + :param status: Only return subscriptions with the provided status + (optional). + :type status: ``str`` + :param resources: Only return subscriptions for the provided resources + (optional). + :type resources: ``list`` + + :rtype: ``list`` + """ + params = {} + + if status: + params['status'] = status + + if resources: + params['resource'] = ','.join(resources) + + response = self.connection.request(action='/subscriptions/', + params=params).object + subscriptions = self._to_subscriptions(data=response) + return subscriptions + + def ex_toggle_subscription_auto_renew(self, subscription): + """ + Toggle subscription auto renew status. + + :param subscription: Subscription to toggle the auto renew flag for. + :type subscription: :class:`.CloudSigmaSubscription` + + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` + """ + path = '/subscriptions/%s/action/' % (subscription.id) + response = self._perform_action(path=path, action='auto_renew', + method='POST') + return response.status == httplib.OK + + def ex_create_subscription(self, amount, period, resource, + auto_renew=False): + """ + Create a new subscription. + + :param amount: Subscription amount. For example, in dssd case this + would be disk size in gigabytes. + :type amount: ``int`` + + :param period: Subscription period. For example: 30 days, 1 week, 1 + month, ... + :type period: ``str`` + + :param resource: Resource the purchase the subscription for. + :type resource: ``str`` + + :param auto_renew: True to automatically renew the subscription. + :type auto_renew: ``bool`` + """ + data = [ + { + 'amount': amount, + 'period': period, + 'auto_renew': auto_renew, + 'resource': resource + } + ] + + response = self.connection.request(action='/subscriptions/', + data=data, method='POST') + data = response.object['objects'][0] + subscription = self._to_subscription(data=data) + return subscription + + # Misc extension methods + + def ex_list_capabilities(self): + """ + Retrieve all the basic and sensible limits of the API. + + :rtype: ``dict`` + """ + action = '/capabilities/' + response = self.connection.request(action=action, + method='GET') + capabilities = response.object + return capabilities + + def _parse_ips_from_nic(self, nic): + """ + Parse private and public IP addresses from the provided network + interface object. + + :param nic: NIC object. + :type nic: ``dict`` + + :return: (public_ips, private_ips) tuple. + :rtype: ``tuple`` + """ + public_ips, private_ips = [], [] + + ipv4_conf = nic['ip_v4_conf'] + ipv6_conf = nic['ip_v6_conf'] + + ipv4 = ipv4_conf['ip'] if ipv4_conf else None + ipv6 = ipv6_conf['ip'] if ipv6_conf else None + + ips = [] + + if ipv4: + ips.append(ipv4) + + if ipv6: + ips.append(ipv6) + + runtime = nic['runtime'] + + ip_v4 = runtime['ip_v4'] if nic['runtime'] else None + ip_v6 = runtime['ip_v6'] if nic['runtime'] else None + + ipv4 = ip_v4['uuid'] if ip_v4 else None + ipv6 = ip_v4['uuid'] if ip_v6 else None + + if ipv4: + ips.append(ipv4) + + if ipv6: + ips.append(ipv6) + + ips = set(ips) + + for ip in ips: + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + + return public_ips, private_ips + + def _to_node(self, data): + extra_keys = ['cpu', 'mem', 'nics', 'vnc_password', 'meta'] + + id = data['uuid'] + name = data['name'] + state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) + + public_ips = [] + private_ips = [] + extra = self._extract_values(obj=data, keys=extra_keys) + + for nic in data['nics']: + _public_ips, _private_ips = self._parse_ips_from_nic(nic=nic) + + public_ips.extend(_public_ips) + private_ips.extend(_private_ips) + + node = Node(id=id, name=name, state=state, public_ips=public_ips, + private_ips=private_ips, driver=self, extra=extra) + return node + + def _to_image(self, data): + extra_keys = ['description', 'arch', 'image_type', 'os', 'licenses', + 'media', 'meta'] + + id = data['uuid'] + name = data['name'] + extra = self._extract_values(obj=data, keys=extra_keys) + + image = NodeImage(id=id, name=name, driver=self, extra=extra) + return image + + def _to_drive(self, data): + id = data['uuid'] + name = data['name'] + size = data['size'] + media = data['media'] + status = data['status'] + extra = {} + + drive = CloudSigmaDrive(id=id, name=name, size=size, media=media, + status=status, driver=self, extra=extra) + + return drive + + def _to_tag(self, data): + resources = data['resources'] + resources = [resource['uuid'] for resource in resources] + + tag = CloudSigmaTag(id=data['uuid'], name=data['name'], + resources=resources) + return tag + + def _to_subscriptions(self, data): + subscriptions = [] + + for item in data['objects']: + subscription = self._to_subscription(data=item) + subscriptions.append(subscription) + + return subscriptions + + def _to_subscription(self, data): + start_time = parse_date(data['start_time']) + end_time = parse_date(data['end_time']) + obj_uuid = data['subscribed_object'] + + subscription = CloudSigmaSubscription(id=data['id'], + resource=data['resource'], + amount=int(data['amount']), + period=data['period'], + status=data['status'], + price=data['price'], + start_time=start_time, + end_time=end_time, + auto_renew=data['auto_renew'], + subscribed_object=obj_uuid) + return subscription + + def _to_firewall_policy(self, data): + rules = [] + + for item in data.get('rules', []): + rule = CloudSigmaFirewallPolicyRule(action=item['action'], + direction=item['direction'], + ip_proto=item['ip_proto'], + src_ip=item['src_ip'], + src_port=item['src_port'], + dst_ip=item['dst_ip'], + dst_port=item['dst_port'], + comment=item['comment']) + rules.append(rule) + + policy = CloudSigmaFirewallPolicy(id=data['uuid'], name=data['name'], + rules=rules) + return policy + + def _perform_action(self, path, action, method='POST', params=None, + data=None): + """ + Perform API action and return response object. + """ + if params: + params = params.copy() + else: + params = {} + + params['do'] = action + response = self.connection.request(action=path, method=method, + params=params, data=data) + return response + + def _is_installation_cd(self, image): + """ + Detect if the provided image is an installation CD. + + :rtype: ``bool`` + """ + if isinstance(image, CloudSigmaDrive) and image.media == 'cdrom': + return True + + return False + + def _extract_values(self, obj, keys): + """ + Extract values from a dictionary and return a new dictionary with + extracted values. + + :param obj: Dictionary to extract values from. + :type obj: ``dict`` + + :param keys: Keys to extract. + :type keys: ``list`` + + :return: Dictionary with extracted values. + :rtype: ``dict`` + """ + result = {} + + for key in keys: + result[key] = obj[key] + + return result + + def _wait_for_drive_state_transition(self, drive, state, + timeout=DRIVE_TRANSITION_TIMEOUT): + """ + Wait for a drive to transition to the provided state. + + Note: This function blocks and periodically calls "GET drive" endpoint + to check if the drive has already transitioned to the desired state. + + :param drive: Drive to wait for. + :type drive: :class:`.CloudSigmaDrive` + + :param state: Desired drive state. + :type state: ``str`` + + :param timeout: How long to wait for the transition (in seconds) before + timing out. + :type timeout: ``int`` + + :return: Drive object. + :rtype: :class:`.CloudSigmaDrive` + """ + + start_time = time.time() + + while drive.status != state: + drive = self.ex_get_drive(drive_id=drive.id) + + if drive.status == state: + break + + current_time = time.time() + delta = (current_time - start_time) + + if delta >= timeout: + msg = ('Timed out while waiting for drive transition ' + '(timeout=%s seconds)' % (timeout)) + raise Exception(msg) + + time.sleep(self.DRIVE_TRANSITION_SLEEP_INTERVAL) + + return drive + + def _ex_connection_class_kwargs(self): + """ + Return the host value based on the user supplied region. + """ + kwargs = {} + + if not self._host_argument_set: + kwargs['host'] = API_ENDPOINTS_2_0[self.region]['host'] + + return kwargs diff -Nru libcloud-0.5.0/libcloud/compute/drivers/cloudstack.py libcloud-0.15.1/libcloud/compute/drivers/cloudstack.py --- libcloud-0.5.0/libcloud/compute/drivers/cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/cloudstack.py 2014-07-02 18:47:55.000000000 +0000 @@ -0,0 +1,2208 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import base64 +import warnings + +from libcloud.utils.py3 import b +from libcloud.utils.py3 import urlparse + +from libcloud.compute.providers import Provider +from libcloud.common.cloudstack import CloudStackDriverMixIn +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import NodeSize, StorageVolume +from libcloud.compute.base import KeyPair +from libcloud.compute.types import NodeState, LibcloudError +from libcloud.compute.types import KeyPairDoesNotExistError +from libcloud.utils.networking import is_private_subnet + + +""" +Define the extra dictionary for specific resources +""" +RESOURCE_EXTRA_ATTRIBUTES_MAP = { + 'network': { + 'broadcast_domain_type': { + 'key_name': 'broadcastdomaintype', + 'transform_func': str + }, + 'traffic_type': { + 'key_name': 'traffictype', + 'transform_func': str + }, + 'zone_name': { + 'key_name': 'zonename', + 'transform_func': str + }, + 'network_offering_name': { + 'key_name': 'networkofferingname', + 'transform_func': str + }, + 'network_offeringdisplay_text': { + 'key_name': 'networkofferingdisplaytext', + 'transform_func': str + }, + 'network_offering_availability': { + 'key_name': 'networkofferingavailability', + 'transform_func': str + }, + 'is_system': { + 'key_name': 'issystem', + 'transform_func': str + }, + 'state': { + 'key_name': 'state', + 'transform_func': str + }, + 'dns1': { + 'key_name': 'dns1', + 'transform_func': str + }, + 'dns2': { + 'key_name': 'dns2', + 'transform_func': str + }, + 'type': { + 'key_name': 'type', + 'transform_func': str + }, + 'acl_type': { + 'key_name': 'acltype', + 'transform_func': str + }, + 'subdomain_access': { + 'key_name': 'subdomainaccess', + 'transform_func': str + }, + 'network_domain': { + 'key_name': 'networkdomain', + 'transform_func': str + }, + 'physical_network_id': { + 'key_name': 'physicalnetworkid', + 'transform_func': str + }, + 'can_use_for_deploy': { + 'key_name': 'canusefordeploy', + 'transform_func': str + }, + 'gateway': { + 'key_name': 'gateway', + 'transform_func': str + }, + 'netmask': { + 'key_name': 'netmask', + 'transform_func': str + }, + 'vpc_id': { + 'key_name': 'vpcid', + 'transform_func': str + }, + 'project_id': { + 'key_name': 'projectid', + 'transform_func': str + } + }, + 'node': { + 'haenable': { + 'key_name': 'haenable', + 'transform_func': str + }, + 'zone_id': { + 'key_name': 'zoneid', + 'transform_func': str + }, + 'zone_name': { + 'key_name': 'zonename', + 'transform_func': str + }, + 'key_name': { + 'key_name': 'keypair', + 'transform_func': str + }, + 'password': { + 'key_name': 'password', + 'transform_func': str + }, + 'image_id': { + 'key_name': 'templateid', + 'transform_func': str + }, + 'image_name': { + 'key_name': 'templatename', + 'transform_func': str + }, + 'template_display_text': { + 'key_name': 'templatdisplaytext', + 'transform_func': str + }, + 'password_enabled': { + 'key_name': 'passwordenabled', + 'transform_func': str + }, + 'size_id': { + 'key_name': 'serviceofferingid', + 'transform_func': str + }, + 'size_name': { + 'key_name': 'serviceofferingname', + 'transform_func': str + }, + 'root_device_id': { + 'key_name': 'rootdeviceid', + 'transform_func': str + }, + 'root_device_type': { + 'key_name': 'rootdevicetype', + 'transform_func': str + }, + 'hypervisor': { + 'key_name': 'hypervisor', + 'transform_func': str + }, + 'project': { + 'key_name': 'project', + 'transform_func': str + }, + 'project_id': { + 'key_name': 'projectid', + 'transform_func': str + } + }, + 'volume': { + 'created': { + 'key_name': 'created', + 'transform_func': str + }, + 'device_id': { + 'key_name': 'deviceid', + 'transform_func': int + }, + 'instance_id': { + 'key_name': 'serviceofferingid', + 'transform_func': str + }, + 'state': { + 'key_name': 'state', + 'transform_func': str + }, + 'volume_type': { + 'key_name': 'type', + 'transform_func': str + }, + 'zone_id': { + 'key_name': 'zoneid', + 'transform_func': str + }, + 'zone_name': { + 'key_name': 'zonename', + 'transform_func': str + } + }, + 'project': { + 'account': {'key_name': 'account', 'transform_func': str}, + 'cpuavailable': {'key_name': 'cpuavailable', 'transform_func': int}, + 'cpulimit': {'key_name': 'cpulimit', 'transform_func': int}, + 'cputotal': {'key_name': 'cputotal', 'transform_func': int}, + 'domain': {'key_name': 'domain', 'transform_func': str}, + 'domainid': {'key_name': 'domainid', 'transform_func': str}, + 'ipavailable': {'key_name': 'ipavailable', 'transform_func': int}, + 'iplimit': {'key_name': 'iplimit', 'transform_func': int}, + 'iptotal': {'key_name': 'iptotal', 'transform_func': int}, + 'memoryavailable': {'key_name': 'memoryavailable', + 'transform_func': int}, + 'memorylimit': {'key_name': 'memorylimit', 'transform_func': int}, + 'memorytotal': {'key_name': 'memorytotal', 'transform_func': int}, + 'networkavailable': {'key_name': 'networkavailable', + 'transform_func': int}, + 'networklimit': {'key_name': 'networklimit', 'transform_func': int}, + 'networktotal': {'key_name': 'networktotal', 'transform_func': int}, + 'primarystorageavailable': {'key_name': 'primarystorageavailable', + 'transform_func': int}, + 'primarystoragelimit': {'key_name': 'primarystoragelimit', + 'transform_func': int}, + 'primarystoragetotal': {'key_name': 'primarystoragetotal', + 'transform_func': int}, + 'secondarystorageavailable': {'key_name': 'secondarystorageavailable', + 'transform_func': int}, + 'secondarystoragelimit': {'key_name': 'secondarystoragelimit', + 'transform_func': int}, + 'secondarystoragetotal': {'key_name': 'secondarystoragetotal', + 'transform_func': int}, + 'snapshotavailable': {'key_name': 'snapshotavailable', + 'transform_func': int}, + 'snapshotlimit': {'key_name': 'snapshotlimit', 'transform_func': int}, + 'snapshottotal': {'key_name': 'snapshottotal', 'transform_func': int}, + 'state': {'key_name': 'state', 'transform_func': str}, + 'tags': {'key_name': 'tags', 'transform_func': str}, + 'templateavailable': {'key_name': 'templateavailable', + 'transform_func': int}, + 'templatelimit': {'key_name': 'templatelimit', 'transform_func': int}, + 'templatetotal': {'key_name': 'templatetotal', 'transform_func': int}, + 'vmavailable': {'key_name': 'vmavailable', 'transform_func': int}, + 'vmlimit': {'key_name': 'vmlimit', 'transform_func': int}, + 'vmrunning': {'key_name': 'vmrunning', 'transform_func': int}, + 'vmtotal': {'key_name': 'vmtotal', 'transform_func': int}, + 'volumeavailable': {'key_name': 'volumeavailable', + 'transform_func': int}, + 'volumelimit': {'key_name': 'volumelimit', 'transform_func': int}, + 'volumetotal': {'key_name': 'volumetotal', 'transform_func': int}, + 'vpcavailable': {'key_name': 'vpcavailable', 'transform_func': int}, + 'vpclimit': {'key_name': 'vpclimit', 'transform_func': int}, + 'vpctotal': {'key_name': 'vpctotal', 'transform_func': int} + } +} + + +class CloudStackNode(Node): + """ + Subclass of Node so we can expose our extension methods. + """ + + def ex_allocate_public_ip(self): + """ + Allocate a public IP and bind it to this node. + """ + return self.driver.ex_allocate_public_ip(self) + + def ex_release_public_ip(self, address): + """ + Release a public IP that this node holds. + """ + return self.driver.ex_release_public_ip(self, address) + + def ex_create_ip_forwarding_rule(self, address, protocol, + start_port, end_port=None): + """ + Add a NAT/firewall forwarding rule for a port or ports. + """ + return self.driver.ex_create_ip_forwarding_rule(node=self, + address=address, + protocol=protocol, + start_port=start_port, + end_port=end_port) + + def ex_create_port_forwarding_rule(self, address, + private_port, public_port, + protocol, + public_end_port=None, + private_end_port=None, + openfirewall=True): + """ + Add a port forwarding rule for port or ports. + """ + return self.driver.ex_create_port_forwarding_rule( + node=self, address=address, private_port=private_port, + public_port=public_port, protocol=protocol, + public_end_port=public_end_port, private_end_port=private_end_port, + openfirewall=openfirewall) + + def ex_delete_ip_forwarding_rule(self, rule): + """ + Delete a port forwarding rule. + """ + return self.driver.ex_delete_ip_forwarding_rule(node=self, rule=rule) + + def ex_delete_port_forwarding_rule(self, rule): + """ + Delete a NAT/firewall rule. + """ + return self.driver.ex_delete_port_forwarding_rule(node=self, rule=rule) + + def ex_start(self): + """ + Starts a stopped virtual machine. + """ + return self.driver.ex_start(node=self) + + def ex_stop(self): + """ + Stops a running virtual machine. + """ + return self.driver.ex_stop(node=self) + + +class CloudStackAddress(object): + """ + A public IP address. + + :param id: UUID of the Public IP + :type id: ``str`` + + :param address: The public IP address + :type address: ``str`` + + :param associated_network_id: The ID of the network where this address + has been associated with + :type associated_network_id: ``str`` + """ + + def __init__(self, id, address, driver, associated_network_id=None): + self.id = id + self.address = address + self.driver = driver + self.associated_network_id = associated_network_id + + def release(self): + self.driver.ex_release_public_ip(address=self) + + def __str__(self): + return self.address + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + +class CloudStackIPForwardingRule(object): + """ + A NAT/firewall forwarding rule. + """ + + def __init__(self, node, id, address, protocol, start_port, end_port=None): + self.node = node + self.id = id + self.address = address + self.protocol = protocol + self.start_port = start_port + self.end_port = end_port + + def delete(self): + self.node.ex_delete_ip_forwarding_rule(rule=self) + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + +class CloudStackPortForwardingRule(object): + """ + A Port forwarding rule for Source NAT. + """ + + def __init__(self, node, rule_id, address, protocol, public_port, + private_port, public_end_port=None, private_end_port=None): + """ + A Port forwarding rule for Source NAT. + + @note: This is a non-standard extension API, and only works for EC2. + + :param node: Node for rule + :type node: :class:`Node` + + :param rule_id: Rule ID + :type rule_id: ``int`` + + :param address: External IP address + :type address: :class:`CloudStackAddress` + + :param protocol: TCP/IP Protocol (TCP, UDP) + :type protocol: ``str`` + + :param public_port: External port for rule (or start port if + public_end_port is also provided) + :type public_port: ``int`` + + :param private_port: Internal node port for rule (or start port if + public_end_port is also provided) + :type private_port: ``int`` + + :param public_end_port: End of external port range + :type public_end_port: ``int`` + + :param private_end_port: End of internal port range + :type private_end_port: ``int`` + + :rtype: :class:`CloudStackPortForwardingRule` + """ + self.node = node + self.id = rule_id + self.address = address + self.protocol = protocol + self.public_port = public_port + self.public_end_port = public_end_port + self.private_port = private_port + self.private_end_port = private_end_port + + def delete(self): + self.node.ex_delete_port_forwarding_rule(rule=self) + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + +class CloudStackDiskOffering(object): + """ + A disk offering within CloudStack. + """ + + def __init__(self, id, name, size, customizable): + self.id = id + self.name = name + self.size = size + self.customizable = customizable + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + +class CloudStackNetwork(object): + """ + Class representing a CloudStack Network. + """ + + def __init__(self, displaytext, name, networkofferingid, id, zoneid, + driver, extra=None): + self.displaytext = displaytext + self.name = name + self.networkofferingid = networkofferingid + self.id = id + self.zoneid = zoneid + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.displaytext, self.name, + self.networkofferingid, self.zoneid, self.driver.name)) + + +class CloudStackNetworkOffering(object): + """ + Class representing a CloudStack Network Offering. + """ + + def __init__(self, name, display_text, guest_ip_type, id, + service_offering_id, for_vpc, driver, extra=None): + self.display_text = display_text + self.name = name + self.guest_ip_type = guest_ip_type + self.id = id + self.service_offering_id = service_offering_id + self.for_vpc = for_vpc + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.display_text, + self.guest_ip_type, self.service_offering_id, self.for_vpc, + self.driver.name)) + + +class CloudStackProject(object): + """ + Class representing a CloudStack Project. + """ + + def __init__(self, id, name, display_text, driver, extra=None): + self.id = id + self.name = name + self.display_text = display_text + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.display_text, self.name, + self.driver.name)) + + +class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver): + """ + Driver for the CloudStack API. + + :cvar host: The host where the API can be reached. + :cvar path: The path where the API can be reached. + :cvar async_poll_frequency: How often (in seconds) to poll for async + job completion. + :type async_poll_frequency: ``int``""" + + name = 'CloudStack' + api_name = 'cloudstack' + website = 'http://cloudstack.org/' + type = Provider.CLOUDSTACK + + features = {'create_node': ['generates_password']} + + NODE_STATE_MAP = { + 'Running': NodeState.RUNNING, + 'Starting': NodeState.REBOOTING, + 'Stopped': NodeState.STOPPED, + 'Stopping': NodeState.PENDING, + 'Destroyed': NodeState.TERMINATED, + 'Expunging': NodeState.PENDING, + 'Error': NodeState.TERMINATED + } + + def __init__(self, key, secret=None, secure=True, host=None, + path=None, port=None, url=None, *args, **kwargs): + """ + :inherits: :class:`NodeDriver.__init__` + + :param host: The host where the API can be reached. (required) + :type host: ``str`` + + :param path: The path where the API can be reached. (required) + :type path: ``str`` + + :param url: Full URL to the API endpoint. Mutually exclusive with host + and path argument. + :type url: ``str`` + """ + if url: + parsed = urlparse.urlparse(url) + + path = parsed.path + + scheme = parsed.scheme + split = parsed.netloc.split(':') + + if len(split) == 1: + # No port provided, use the default one + host = parsed.netloc + port = 443 if scheme == 'https' else 80 + else: + host = split[0] + port = int(split[1]) + else: + host = host if host else self.host + path = path if path else self.path + + if path is not None: + self.path = path + + if host is not None: + self.host = host + + if (self.type == Provider.CLOUDSTACK) and (not host or not path): + raise Exception('When instantiating CloudStack driver directly ' + 'you also need to provide url or host and path ' + 'argument') + + super(CloudStackNodeDriver, self).__init__(key=key, + secret=secret, + secure=secure, + host=host, + port=port) + + def list_images(self, location=None): + args = { + 'templatefilter': 'executable' + } + if location is not None: + args['zoneid'] = location.id + imgs = self._sync_request(command='listTemplates', + params=args, + method='GET') + images = [] + for img in imgs.get('template', []): + images.append(NodeImage( + id=img['id'], + name=img['name'], + driver=self.connection.driver, + extra={ + 'hypervisor': img['hypervisor'], + 'format': img['format'], + 'os': img['ostypename'], + 'displaytext': img['displaytext']})) + return images + + def list_locations(self): + """ + :rtype ``list`` of :class:`NodeLocation` + """ + locs = self._sync_request('listZones') + + locations = [] + for loc in locs['zone']: + location = NodeLocation(str(loc['id']), loc['name'], 'Unknown', + self) + locations.append(location) + + return locations + + def list_nodes(self, project=None): + """ + @inherits: :class:`NodeDriver.list_nodes` + + :keyword project: Limit nodes returned to those configured under + the defined project. + :type project: :class:`.CloudStackProject` + + :rtype: ``list`` of :class:`CloudStackNode` + """ + + args = {} + if project: + args['projectid'] = project.id + vms = self._sync_request('listVirtualMachines', params=args) + addrs = self._sync_request('listPublicIpAddresses', params=args) + + public_ips_map = {} + for addr in addrs.get('publicipaddress', []): + if 'virtualmachineid' not in addr: + continue + vm_id = str(addr['virtualmachineid']) + if vm_id not in public_ips_map: + public_ips_map[vm_id] = {} + public_ips_map[vm_id][addr['ipaddress']] = addr['id'] + + nodes = [] + + for vm in vms.get('virtualmachine', []): + public_ips = public_ips_map.get(str(vm['id']), {}).keys() + public_ips = list(public_ips) + node = self._to_node(data=vm, public_ips=public_ips) + + addresses = public_ips_map.get(vm['id'], {}).items() + addresses = [CloudStackAddress(node, v, k) for k, v in addresses] + node.extra['ip_addresses'] = addresses + + rules = [] + for addr in addresses: + result = self._sync_request('listIpForwardingRules') + for r in result.get('ipforwardingrule', []): + if str(r['virtualmachineid']) == node.id: + rule = CloudStackIPForwardingRule(node, r['id'], + addr, + r['protocol'] + .upper(), + r['startport'], + r['endport']) + rules.append(rule) + node.extra['ip_forwarding_rules'] = rules + + rules = [] + public_ips = self.ex_list_public_ips() + result = self._sync_request('listPortForwardingRules') + for r in result.get('portforwardingrule', []): + if str(r['virtualmachineid']) == node.id: + addr = [a for a in public_ips if + a.address == r['ipaddress']] + rule = CloudStackPortForwardingRule(node, r['id'], + addr[0], + r['protocol'].upper(), + r['publicport'], + r['privateport'], + r['publicendport'], + r['privateendport']) + if not addr[0].address in node.public_ips: + node.public_ips.append(addr[0].address) + rules.append(rule) + node.extra['port_forwarding_rules'] = rules + + nodes.append(node) + + return nodes + + def list_sizes(self, location=None): + """ + :rtype ``list`` of :class:`NodeSize` + """ + szs = self._sync_request(command='listServiceOfferings', + method='GET') + sizes = [] + for sz in szs['serviceoffering']: + extra = {'cpu': sz['cpunumber']} + sizes.append(NodeSize(sz['id'], sz['name'], sz['memory'], 0, 0, + 0, self, extra=extra)) + return sizes + + def create_node(self, **kwargs): + """ + Create a new node + + @inherits: :class:`NodeDriver.create_node` + + :keyword networks: Optional list of networks to launch the server + into. + :type networks: ``list`` of :class:`.CloudStackNetwork` + + :keyword project: Optional project to create the new node under. + :type project: :class:`.CloudStackProject` + + :keyword diskoffering: Optional disk offering to add to the new + node. + :type diskoffering: :class:`.CloudStackDiskOffering` + + :keyword ex_keyname: Name of existing keypair + :type ex_keyname: ``str`` + + :keyword ex_userdata: String containing user data + :type ex_userdata: ``str`` + + :keyword ex_security_groups: List of security groups to assign to + the node + :type ex_security_groups: ``list`` of ``str`` + + :keyword ex_displayname: String containing instance display name + :type ex_displayname: ``str`` + + :rtype: :class:`.CloudStackNode` + """ + + server_params = self._create_args_to_params(None, **kwargs) + + data = self._async_request(command='deployVirtualMachine', + params=server_params, + method='GET')['virtualmachine'] + node = self._to_node(data=data) + return node + + def _create_args_to_params(self, node, **kwargs): + server_params = {} + + # TODO: Refactor and use "kwarg_to_server_params" map + name = kwargs.get('name', None) + size = kwargs.get('size', None) + image = kwargs.get('image', None) + location = kwargs.get('location', None) + networks = kwargs.get('networks', None) + project = kwargs.get('project', None) + diskoffering = kwargs.get('diskoffering', None) + ex_key_name = kwargs.get('ex_keyname', None) + ex_user_data = kwargs.get('ex_userdata', None) + ex_security_groups = kwargs.get('ex_security_groups', None) + ex_displayname = kwargs.get('ex_displayname', None) + + if name: + server_params['name'] = name + + if ex_displayname: + server_params['displayname'] = ex_displayname + + if size: + server_params['serviceofferingid'] = size.id + + if image: + server_params['templateid'] = image.id + + if location: + server_params['zoneid'] = location.id + else: + # Use a default location + server_params['zoneid'] = self.list_locations()[0].id + + if networks: + networks = ','.join([network.id for network in networks]) + server_params['networkids'] = networks + + if project: + server_params['projectid'] = project.id + + if diskoffering: + server_params['diskofferingid'] = diskoffering.id + + if ex_key_name: + server_params['keypair'] = ex_key_name + + if ex_user_data: + ex_user_data = base64.b64encode(b(ex_user_data).decode('ascii')) + server_params['userdata'] = ex_user_data + + if ex_security_groups: + ex_security_groups = ','.join(ex_security_groups) + server_params['securitygroupnames'] = ex_security_groups + + return server_params + + def destroy_node(self, node): + """ + @inherits: :class:`NodeDriver.reboot_node` + :type node: :class:`CloudStackNode` + + :rtype: ``bool`` + """ + self._async_request(command='destroyVirtualMachine', + params={'id': node.id}, + method='GET') + return True + + def reboot_node(self, node): + """ + @inherits: :class:`NodeDriver.reboot_node` + :type node: :class:`CloudStackNode` + + :rtype: ``bool`` + """ + self._async_request(command='rebootVirtualMachine', + params={'id': node.id}, + method='GET') + return True + + def ex_start(self, node): + """ + Starts/Resumes a stopped virtual machine + + :type node: :class:`CloudStackNode` + + :param id: The ID of the virtual machine (required) + :type id: ``str`` + + :param hostid: destination Host ID to deploy the VM to + parameter available for root admin only + :type hostid: ``str`` + + :rtype ``str`` + """ + res = self._async_request(command='startVirtualMachine', + params={'id': node.id}, + method='GET') + return res['virtualmachine']['state'] + + def ex_stop(self, node): + """ + Stops/Suspends a running virtual machine + + :param node: Node to stop. + :type node: :class:`CloudStackNode` + + :rtype: ``str`` + """ + res = self._async_request(command='stopVirtualMachine', + params={'id': node.id}, + method='GET') + return res['virtualmachine']['state'] + + def ex_list_disk_offerings(self): + """ + Fetch a list of all available disk offerings. + + :rtype: ``list`` of :class:`CloudStackDiskOffering` + """ + + diskOfferings = [] + + diskOfferResponse = self._sync_request(command='listDiskOfferings', + method='GET') + for diskOfferDict in diskOfferResponse.get('diskoffering', ()): + diskOfferings.append( + CloudStackDiskOffering( + id=diskOfferDict['id'], + name=diskOfferDict['name'], + size=diskOfferDict['disksize'], + customizable=diskOfferDict['iscustomized'])) + + return diskOfferings + + def ex_list_networks(self): + """ + List the available networks + + :rtype ``list`` of :class:`CloudStackNetwork` + """ + + res = self._sync_request(command='listNetworks', + method='GET') + nets = res.get('network', []) + + networks = [] + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network'] + for net in nets: + extra = self._get_extra_dict(net, extra_map) + + if 'tags' in net: + extra['tags'] = self._get_resource_tags(net['tags']) + + networks.append(CloudStackNetwork( + net['displaytext'], + net['name'], + net['networkofferingid'], + net['id'], + net['zoneid'], + self, + extra=extra)) + + return networks + + def ex_list_network_offerings(self): + """ + List the available network offerings + + :rtype ``list`` of :class:`CloudStackNetworkOffering` + """ + res = self._sync_request(command='listNetworkOfferings', + method='GET') + netoffers = res.get('networkoffering', []) + + networkofferings = [] + + for netoffer in netoffers: + networkofferings.append(CloudStackNetworkOffering( + netoffer['name'], + netoffer['displaytext'], + netoffer['guestiptype'], + netoffer['id'], + netoffer['serviceofferingid'], + netoffer['forvpc'], + self)) + + return networkofferings + + def ex_create_network(self, display_text, name, network_offering, + location, gateway=None, netmask=None, + network_domain=None, vpc_id=None, project_id=None): + """ + + Creates a Network, only available in advanced zones. + + :param display_text: the display text of the network + :type display_text: ``str`` + + :param name: the name of the network + :type name: ``str`` + + :param network_offering: the network offering id + :type network_offering: :class:'CloudStackNetworkOffering` + + :param location: Zone + :type location: :class:`NodeLocation` + + :param gateway: Optional, the Gateway of this network + :type gateway: ``str`` + + :param netmask: Optional, the netmask of this network + :type netmask: ``str`` + + :param network_domain: Optional, the DNS domain of the network + :type network_domain: ``str`` + + :param vpc_id: Optional, the VPC id the network belongs to + :type vpc_id: ``str`` + + :param project_id: Optional, the project id the networks belongs to + :type project_id: ``str`` + + :rtype: :class:`CloudStackNetwork` + + """ + + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network'] + + args = { + 'displaytext': display_text, + 'name': name, + 'networkofferingid': network_offering.id, + 'zoneid': location.id, + } + + if gateway is not None: + args['gateway'] = gateway + + if netmask is not None: + args['netmask'] = netmask + + if network_domain is not None: + args['networkdomain'] = network_domain + + if vpc_id is not None: + args['vpcid'] = vpc_id + + if project_id is not None: + args['projectid'] = project_id + + """ Cloudstack allows for duplicate network names, + this should be handled in the code leveraging libcloud + As there could be use cases for duplicate names. + e.g. management from ROOT level""" + + # for net in self.ex_list_networks(): + # if name == net.name: + # raise LibcloudError('This network name already exists') + + result = self._sync_request(command='createNetwork', + params=args, + method='GET') + + result = result['network'] + extra = self._get_extra_dict(result, extra_map) + + network = CloudStackNetwork(display_text, + name, + network_offering.id, + result['id'], + location.id, + self, + extra=extra) + + return network + + def ex_delete_network(self, network, force=None): + """ + + Deletes a Network, only available in advanced zones. + + :param network: The network + :type network: :class: 'CloudStackNetwork' + + :param force: Force deletion of the network? + :type force: ``bool`` + + :rtype: ``bool`` + + """ + + args = {'id': network.id, 'forced': force} + + self._async_request(command='deleteNetwork', + params=args, + method='GET') + return True + + def ex_list_projects(self): + """ + List the available projects + + :rtype ``list`` of :class:`CloudStackProject` + """ + + res = self._sync_request(command='listProjects', + method='GET') + projs = res.get('project', []) + + projects = [] + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['project'] + for proj in projs: + extra = self._get_extra_dict(proj, extra_map) + + if 'tags' in proj: + extra['tags'] = self._get_resource_tags(proj['tags']) + + projects.append(CloudStackProject( + id=proj['id'], + name=proj['name'], + display_text=proj['displaytext'], + driver=self, + extra=extra)) + + return projects + + def create_volume(self, size, name, location=None, snapshot=None): + """ + Creates a data volume + Defaults to the first location + """ + for diskOffering in self.ex_list_disk_offerings(): + if diskOffering.size == size or diskOffering.customizable: + break + else: + raise LibcloudError( + 'Disk offering with size=%s not found' % size) + + if location is None: + location = self.list_locations()[0] + + params = {'name': name, + 'diskOfferingId': diskOffering.id, + 'zoneId': location.id} + + if diskOffering.customizable: + params['size'] = size + + requestResult = self._async_request(command='createVolume', + params=params, + method='GET') + + volumeResponse = requestResult['volume'] + + return StorageVolume(id=volumeResponse['id'], + name=name, + size=size, + driver=self, + extra=dict(name=volumeResponse['name'])) + + def destroy_volume(self, volume): + """ + :rtype: ``bool`` + """ + self._sync_request(command='deleteVolume', + params={'id': volume.id}, + method='GET') + return True + + def attach_volume(self, node, volume, device=None): + """ + @inherits: :class:`NodeDriver.attach_volume` + :type node: :class:`CloudStackNode` + + :rtype: ``bool`` + """ + # TODO Add handling for device name + self._async_request(command='attachVolume', + params={'id': volume.id, + 'virtualMachineId': node.id}, + method='GET') + return True + + def detach_volume(self, volume): + """ + :rtype: ``bool`` + """ + self._async_request(command='detachVolume', + params={'id': volume.id}, + method='GET') + return True + + def list_volumes(self, node=None): + """ + List all volumes + + :param node: Only return volumes for the provided node. + :type node: :class:`CloudStackNode` + + :rtype: ``list`` of :class:`StorageVolume` + """ + if node: + volumes = self._sync_request(command='listVolumes', + params={'virtualmachineid': node.id}, + method='GET') + else: + volumes = self._sync_request(command='listVolumes', + method='GET') + + list_volumes = [] + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'] + for vol in volumes['volume']: + extra = self._get_extra_dict(vol, extra_map) + + if 'tags' in vol: + extra['tags'] = self._get_resource_tags(vol['tags']) + + list_volumes.append(StorageVolume(id=vol['id'], + name=vol['name'], + size=vol['size'], + driver=self, + extra=extra)) + return list_volumes + + def list_key_pairs(self, **kwargs): + """ + List registered key pairs. + + :param projectid: list objects by project + :type projectid: ``str`` + + :param page: The page to list the keypairs from + :type page: ``int`` + + :param keyword: List by keyword + :type keyword: ``str`` + + :param listall: If set to false, list only resources + belonging to the command's caller; + if set to true - list resources that + the caller is authorized to see. + Default value is false + + :type listall: ``bool`` + + :param pagesize: The number of results per page + :type pagesize: ``int`` + + :param account: List resources by account. + Must be used with the domainId parameter + :type account: ``str`` + + :param isrecursive: Defaults to false, but if true, + lists all resources from + the parent specified by the + domainId till leaves. + :type isrecursive: ``bool`` + + :param fingerprint: A public key fingerprint to look for + :type fingerprint: ``str`` + + :param name: A key pair name to look for + :type name: ``str`` + + :param domainid: List only resources belonging to + the domain specified + :type domainid: ``str`` + + :return: A list of key par objects. + :rtype: ``list`` of :class:`libcloud.compute.base.KeyPair` + """ + extra_args = kwargs.copy() + res = self._sync_request(command='listSSHKeyPairs', + params=extra_args, + method='GET') + key_pairs = res.get('sshkeypair', []) + key_pairs = self._to_key_pairs(data=key_pairs) + return key_pairs + + def get_key_pair(self, name): + params = {'name': name} + res = self._sync_request(command='listSSHKeyPairs', + params=params, + method='GET') + key_pairs = res.get('sshkeypair', []) + + if len(key_pairs) == 0: + raise KeyPairDoesNotExistError(name=name, driver=self) + + key_pair = self._to_key_pair(data=key_pairs[0]) + return key_pair + + def create_key_pair(self, name, **kwargs): + """ + Create a new key pair object. + + :param name: Key pair name. + :type name: ``str`` + + :param name: Name of the keypair (required) + :type name: ``str`` + + :param projectid: An optional project for the ssh key + :type projectid: ``str`` + + :param domainid: An optional domainId for the ssh key. + If the account parameter is used, + domainId must also be used. + :type domainid: ``str`` + + :param account: An optional account for the ssh key. + Must be used with domainId. + :type account: ``str`` + + :return: Created key pair object. + :rtype: :class:`libcloud.compute.base.KeyPair` + """ + extra_args = kwargs.copy() + + params = {'name': name} + params.update(extra_args) + + res = self._sync_request(command='createSSHKeyPair', + params=params, + method='GET') + key_pair = self._to_key_pair(data=res['keypair']) + return key_pair + + def import_key_pair_from_string(self, name, key_material): + """ + Import a new public key from string. + + :param name: Key pair name. + :type name: ``str`` + + :param key_material: Public key material. + :type key_material: ``str`` + + :return: Imported key pair object. + :rtype: :class:`libcloud.compute.base.KeyPair` + """ + res = self._sync_request(command='registerSSHKeyPair', + params={'name': name, + 'publickey': key_material}, + method='GET') + key_pair = self._to_key_pair(data=res['keypair']) + return key_pair + + def delete_key_pair(self, key_pair, **kwargs): + """ + Delete an existing key pair. + + :param key_pair: Key pair object. + :type key_pair: :class`libcloud.compute.base.KeyPair` + + :param projectid: The project associated with keypair + :type projectid: ``str`` + + :param domainid: The domain ID associated with the keypair + :type domainid: ``str`` + + :param account: The account associated with the keypair. + Must be used with the domainId parameter. + :type account: ``str`` + + :return: True of False based on success of Keypair deletion + :rtype: ``bool`` + """ + + extra_args = kwargs.copy() + params = {'name': key_pair.name} + params.update(extra_args) + + res = self._sync_request(command='deleteSSHKeyPair', + params=params, + method='GET') + return res['success'] == 'true' + + def ex_list_public_ips(self): + """ + Lists all Public IP Addresses. + + :rtype: ``list`` of :class:`CloudStackAddress` + """ + ips = [] + + res = self._sync_request(command='listPublicIpAddresses', + method='GET') + + # Workaround for basic zones + if not res: + return ips + + for ip in res['publicipaddress']: + ips.append(CloudStackAddress(ip['id'], + ip['ipaddress'], + self, + ip['associatednetworkid'])) + return ips + + def ex_allocate_public_ip(self, location=None): + """ + Allocate a public IP. + + :param location: Zone + :type location: :class:`NodeLocation` + + :rtype: :class:`CloudStackAddress` + """ + if location is None: + location = self.list_locations()[0] + + addr = self._async_request(command='associateIpAddress', + params={'zoneid': location.id}, + method='GET') + addr = addr['ipaddress'] + addr = CloudStackAddress(addr['id'], addr['ipaddress'], self) + return addr + + def ex_release_public_ip(self, address): + """ + Release a public IP. + + :param address: CloudStackAddress which should be used + :type address: :class:`CloudStackAddress` + + :rtype: ``bool`` + """ + res = self._async_request(command='disassociateIpAddress', + params={'id': address.id}, + method='GET') + return res['success'] + + def ex_list_port_forwarding_rules(self): + """ + Lists all Port Forwarding Rules + + :rtype: ``list`` of :class:`CloudStackPortForwardingRule` + """ + rules = [] + result = self._sync_request(command='listPortForwardingRules', + method='GET') + if result != {}: + public_ips = self.ex_list_public_ips() + nodes = self.list_nodes() + for rule in result['portforwardingrule']: + node = [n for n in nodes + if n.id == str(rule['virtualmachineid'])] + addr = [a for a in public_ips if + a.address == rule['ipaddress']] + rules.append(CloudStackPortForwardingRule + (node[0], + rule['id'], + addr[0], + rule['protocol'], + rule['publicport'], + rule['privateport'], + rule['publicendport'], + rule['privateendport'])) + + return rules + + def ex_create_port_forwarding_rule(self, node, address, + private_port, public_port, + protocol, + public_end_port=None, + private_end_port=None, + openfirewall=True): + """ + Creates a Port Forwarding Rule, used for Source NAT + + :param address: IP address of the Source NAT + :type address: :class:`CloudStackAddress` + + :param private_port: Port of the virtual machine + :type private_port: ``int`` + + :param protocol: Protocol of the rule + :type protocol: ``str`` + + :param public_port: Public port on the Source NAT address + :type public_port: ``int`` + + :param node: The virtual machine + :type node: :class:`CloudStackNode` + + :rtype: :class:`CloudStackPortForwardingRule` + """ + args = { + 'ipaddressid': address.id, + 'protocol': protocol, + 'privateport': int(private_port), + 'publicport': int(public_port), + 'virtualmachineid': node.id, + 'openfirewall': openfirewall + } + if public_end_port: + args['publicendport'] = int(public_end_port) + if private_end_port: + args['privateendport'] = int(private_end_port) + + result = self._async_request(command='createPortForwardingRule', + params=args, + method='GET') + rule = CloudStackPortForwardingRule(node, + result['portforwardingrule'] + ['id'], + address, + protocol, + public_port, + private_port, + public_end_port, + private_end_port) + node.extra['port_forwarding_rules'].append(rule) + node.public_ips.append(address.address) + return rule + + def ex_delete_port_forwarding_rule(self, node, rule): + """ + Remove a Port forwarding rule. + + :param node: Node used in the rule + :type node: :class:`CloudStackNode` + + :param rule: Forwarding rule which should be used + :type rule: :class:`CloudStackPortForwardingRule` + + :rtype: ``bool`` + """ + + node.extra['port_forwarding_rules'].remove(rule) + node.public_ips.remove(rule.address.address) + res = self._async_request(command='deletePortForwardingRule', + params={'id': rule.id}, + method='GET') + return res['success'] + + def ex_create_ip_forwarding_rule(self, node, address, protocol, + start_port, end_port=None): + """ + "Add a NAT/firewall forwarding rule. + + :param node: Node which should be used + :type node: :class:`CloudStackNode` + + :param address: CloudStackAddress which should be used + :type address: :class:`CloudStackAddress` + + :param protocol: Protocol which should be used (TCP or UDP) + :type protocol: ``str`` + + :param start_port: Start port which should be used + :type start_port: ``int`` + + :param end_port: End port which should be used + :type end_port: ``int`` + + :rtype: :class:`CloudStackForwardingRule` + """ + + protocol = protocol.upper() + if protocol not in ('TCP', 'UDP'): + return None + + args = { + 'ipaddressid': address.id, + 'protocol': protocol, + 'startport': int(start_port) + } + if end_port is not None: + args['endport'] = int(end_port) + + result = self._async_request(command='createIpForwardingRule', + params=args, + method='GET') + result = result['ipforwardingrule'] + rule = CloudStackIPForwardingRule(node, result['id'], address, + protocol, start_port, end_port) + node.extra['ip_forwarding_rules'].append(rule) + return rule + + def ex_delete_ip_forwarding_rule(self, node, rule): + """ + Remove a NAT/firewall forwarding rule. + + :param node: Node which should be used + :type node: :class:`CloudStackNode` + + :param rule: Forwarding rule which should be used + :type rule: :class:`CloudStackForwardingRule` + + :rtype: ``bool`` + """ + + node.extra['ip_forwarding_rules'].remove(rule) + self._async_request(command='deleteIpForwardingRule', + params={'id': rule.id}, + method='GET') + return True + + def ex_list_keypairs(self, **kwargs): + """ + List Registered SSH Key Pairs + + :param projectid: list objects by project + :type projectid: ``str`` + + :param page: The page to list the keypairs from + :type page: ``int`` + + :param keyword: List by keyword + :type keyword: ``str`` + + :param listall: If set to false, list only resources + belonging to the command's caller; + if set to true - list resources that + the caller is authorized to see. + Default value is false + + :type listall: ``bool`` + + :param pagesize: The number of results per page + :type pagesize: ``int`` + + :param account: List resources by account. + Must be used with the domainId parameter + :type account: ``str`` + + :param isrecursive: Defaults to false, but if true, + lists all resources from + the parent specified by the + domainId till leaves. + :type isrecursive: ``bool`` + + :param fingerprint: A public key fingerprint to look for + :type fingerprint: ``str`` + + :param name: A key pair name to look for + :type name: ``str`` + + :param domainid: List only resources belonging to + the domain specified + :type domainid: ``str`` + + :return: A list of keypair dictionaries + :rtype: ``list`` of ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'list_key_pairs method') + + key_pairs = self.list_key_pairs(**kwargs) + + result = [] + + for key_pair in key_pairs: + item = { + 'name': key_pair.name, + 'fingerprint': key_pair.fingerprint, + 'privateKey': key_pair.private_key + } + result.append(item) + + return result + + def ex_create_keypair(self, name, **kwargs): + """ + Creates a SSH KeyPair, returns fingerprint and private key + + :param name: Name of the keypair (required) + :type name: ``str`` + + :param projectid: An optional project for the ssh key + :type projectid: ``str`` + + :param domainid: An optional domainId for the ssh key. + If the account parameter is used, + domainId must also be used. + :type domainid: ``str`` + + :param account: An optional account for the ssh key. + Must be used with domainId. + :type account: ``str`` + + :return: A keypair dictionary + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'create_key_pair method') + + key_pair = self.create_key_pair(name=name, **kwargs) + + result = { + 'name': key_pair.name, + 'fingerprint': key_pair.fingerprint, + 'privateKey': key_pair.private_key + } + + return result + + def ex_import_keypair_from_string(self, name, key_material): + """ + Imports a new public key where the public key is passed in as a string + + :param name: The name of the public key to import. + :type name: ``str`` + + :param key_material: The contents of a public key file. + :type key_material: ``str`` + + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_string method') + + key_pair = self.import_key_pair_from_string(name=name, + key_material=key_material) + result = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint + } + + return result + + def ex_import_keypair(self, name, keyfile): + """ + Imports a new public key where the public key is passed via a filename + + :param name: The name of the public key to import. + :type name: ``str`` + + :param keyfile: The filename with path of the public key to import. + :type keyfile: ``str`` + + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_file method') + + key_pair = self.import_key_pair_from_file(name=name, + key_file_path=keyfile) + result = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint + } + + return result + + def ex_delete_keypair(self, keypair, **kwargs): + """ + Deletes an existing SSH KeyPair + + :param keypair: Name of the keypair (required) + :type keypair: ``str`` + + :param projectid: The project associated with keypair + :type projectid: ``str`` + + :param domainid: The domain ID associated with the keypair + :type domainid: ``str`` + + :param account: The account associated with the keypair. + Must be used with the domainId parameter. + :type account: ``str`` + + :return: True of False based on success of Keypair deletion + :rtype: ``bool`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'delete_key_pair method') + + key_pair = KeyPair(name=keypair, public_key=None, fingerprint=None, + driver=self) + + return self.delete_key_pair(key_pair=key_pair) + + def ex_list_security_groups(self, **kwargs): + """ + Lists Security Groups + + :param domainid: List only resources belonging to the domain specified + :type domainid: ``str`` + + :param account: List resources by account. Must be used with + the domainId parameter. + :type account: ``str`` + + :param listall: If set to false, list only resources belonging to + the command's caller; if set to true + list resources that the caller is + authorized to see. + Default value is false + :type listall: ``bool`` + + :param pagesize: Number of entries per page + :type pagesize: ``int`` + + :param keyword: List by keyword + :type keyword: ``str`` + + :param tags: List resources by tags (key/value pairs) + :type tags: ``dict`` + + :param id: list the security group by the id provided + :type id: ``str`` + + :param securitygroupname: lists security groups by name + :type securitygroupname: ``str`` + + :param virtualmachineid: lists security groups by virtual machine id + :type virtualmachineid: ``str`` + + :param projectid: list objects by project + :type projectid: ``str`` + + :param isrecursive: (boolean) defaults to false, but if true, + lists all resources from the parent + specified by the domainId till leaves. + :type isrecursive: ``bool`` + + :param page: (integer) + :type page: ``int`` + + :rtype ``list`` + """ + extra_args = kwargs.copy() + res = self._sync_request(command='listSecurityGroups', + params=extra_args, + method='GET') + + security_groups = res.get('securitygroup', []) + return security_groups + + def ex_create_security_group(self, name, **kwargs): + """ + Creates a new Security Group + + :param name: name of the security group (required) + :type name: ``str`` + + :param account: An optional account for the security group. + Must be used with domainId. + :type account: ``str`` + + :param domainid: An optional domainId for the security group. + If the account parameter is used, + domainId must also be used. + :type domainid: ``str`` + + :param description: The description of the security group + :type description: ``str`` + + :param projectid: Deploy vm for the project + :type projectid: ``str`` + + :rtype: ``dict`` + """ + + extra_args = kwargs.copy() + + for sg in self.ex_list_security_groups(): + if name in sg['name']: + raise LibcloudError('This Security Group name already exists') + + params = {'name': name} + params.update(extra_args) + + return self._sync_request(command='createSecurityGroup', + params=params, + method='GET')['securitygroup'] + + def ex_delete_security_group(self, name): + """ + Deletes a given Security Group + + :param domainid: The domain ID of account owning + the security group + :type domainid: ``str`` + + :param id: The ID of the security group. + Mutually exclusive with name parameter + :type id: ``str`` + + :param name: The ID of the security group. + Mutually exclusive with id parameter + :type name: ``str`` + + :param account: The account of the security group. + Must be specified with domain ID + :type account: ``str`` + + :param projectid: The project of the security group + :type projectid: ``str`` + + :rtype: ``bool`` + """ + + return self._sync_request(command='deleteSecurityGroup', + params={'name': name}, + method='GET')['success'] + + def ex_authorize_security_group_ingress(self, securitygroupname, + protocol, cidrlist, startport, + endport=None): + """ + Creates a new Security Group Ingress rule + + :param domainid: An optional domainId for the security group. + If the account parameter is used, + domainId must also be used. + :type domainid: ``str`` + + :param startport: Start port for this ingress rule + :type startport: ``int`` + + :param securitygroupid: The ID of the security group. + Mutually exclusive with securityGroupName + parameter + :type securitygroupid: ``str`` + + :param cidrlist: The cidr list associated + :type cidrlist: ``list`` + + :param usersecuritygrouplist: user to security group mapping + :type usersecuritygrouplist: ``dict`` + + :param securitygroupname: The name of the security group. + Mutually exclusive with + securityGroupName parameter + :type securitygroupname: ``str`` + + :param account: An optional account for the security group. + Must be used with domainId. + :type account: ``str`` + + :param icmpcode: Error code for this icmp message + :type icmpcode: ``int`` + + :param protocol: TCP is default. UDP is the other supported protocol + :type protocol: ``str`` + + :param icmptype: type of the icmp message being sent + :type icmptype: ``int`` + + :param projectid: An optional project of the security group + :type projectid: ``str`` + + :param endport: end port for this ingress rule + :type endport: ``int`` + + :rtype: ``list`` + """ + + protocol = protocol.upper() + if protocol not in ('TCP', 'ICMP'): + raise LibcloudError('Only TCP and ICMP are allowed') + + args = { + 'securitygroupname': securitygroupname, + 'protocol': protocol, + 'startport': int(startport), + 'cidrlist': cidrlist + } + if endport is None: + args['endport'] = int(startport) + + return self._async_request(command='authorizeSecurityGroupIngress', + params=args, + method='GET')['securitygroup'] + + def ex_revoke_security_group_ingress(self, rule_id): + """ + Revoke/delete an ingress security rule + + :param id: The ID of the ingress security rule + :type id: ``str`` + + :rtype: ``bool`` + """ + + self._async_request(command='revokeSecurityGroupIngress', + params={'id': rule_id}, + method='GET') + return True + + def ex_register_iso(self, name, url, location=None, **kwargs): + """ + Registers an existing ISO by URL. + + :param name: Name which should be used + :type name: ``str`` + + :param url: Url should be used + :type url: ``str`` + + :param location: Location which should be used + :type location: :class:`NodeLocation` + + :rtype: ``str`` + """ + if location is None: + location = self.list_locations()[0] + + params = {'name': name, + 'displaytext': name, + 'url': url, + 'zoneid': location.id} + params['bootable'] = kwargs.pop('bootable', False) + if params['bootable']: + os_type_id = kwargs.pop('ostypeid', None) + + if not os_type_id: + raise LibcloudError('If bootable=True, ostypeid is required!') + + params['ostypeid'] = os_type_id + + return self._sync_request(command='registerIso', + name=name, + displaytext=name, + url=url, + zoneid=location.id, + params=params) + + def ex_limits(self): + """ + Extra call to get account's resource limits, such as + the amount of instances, volumes, snapshots and networks. + + CloudStack uses integers as the resource type so we will convert + them to a more human readable string using the resource map + + A list of the resource type mappings can be found at + http://goo.gl/17C6Gk + + :return: dict + :rtype: ``dict`` + """ + + result = self._sync_request(command='listResourceLimits', + method='GET') + + limits = {} + resource_map = { + 0: 'max_instances', + 1: 'max_public_ips', + 2: 'max_volumes', + 3: 'max_snapshots', + 4: 'max_images', + 5: 'max_projects', + 6: 'max_networks', + 7: 'max_vpc', + 8: 'max_cpu', + 9: 'max_memory', + 10: 'max_primary_storage', + 11: 'max_secondary_storage' + } + + for limit in result.get('resourcelimit', []): + # We will ignore unknown types + resource = resource_map.get(int(limit['resourcetype']), None) + if not resource: + continue + limits[resource] = int(limit['max']) + + return limits + + def ex_create_tags(self, resource_ids, resource_type, tags): + """ + Create tags for a resource (Node/StorageVolume/etc). + A list of resource types can be found at http://goo.gl/6OKphH + + :param resource_ids: Resource IDs to be tagged. The resource IDs must + all be associated with the resource_type. + For example, for virtual machines (UserVm) you + can only specify a list of virtual machine IDs. + :type resource_ids: ``list`` of resource IDs + + :param resource_type: Resource type (eg: UserVm) + :type resource_type: ``str`` + + :param tags: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + :type tags: ``dict`` + + :rtype: ``bool`` + """ + params = {'resourcetype': resource_type, + 'resourceids': ','.join(resource_ids)} + + for i, key in enumerate(tags): + params['tags[%d].key' % i] = key + params['tags[%d].value' % i] = tags[key] + + self._async_request(command='createTags', + params=params, + method='GET') + return True + + def ex_delete_tags(self, resource_ids, resource_type, tag_keys): + """ + Delete tags from a resource. + + :param resource_ids: Resource IDs to be tagged. The resource IDs must + all be associated with the resource_type. + For example, for virtual machines (UserVm) you + can only specify a list of virtual machine IDs. + :type resource_ids: ``list`` of resource IDs + + :param resource_type: Resource type (eg: UserVm) + :type resource_type: ``str`` + + :param tag_keys: A list of keys to delete. CloudStack only requires + the keys from the key/value pair. + :type tag_keys: ``list`` + + :rtype: ``bool`` + """ + params = {'resourcetype': resource_type, + 'resourceids': ','.join(resource_ids)} + + for i, key in enumerate(tag_keys): + params['tags[%s].key' % i] = key + + self._async_request(command='deleteTags', + params=params, + method='GET') + + return True + + def _to_node(self, data, public_ips=None): + """ + :param data: Node data object. + :type data: ``dict`` + + :param public_ips: A list of additional IP addresses belonging to + this node. (optional) + :type public_ips: ``list`` or ``None`` + """ + id = data['id'] + + if 'name' in data: + name = data['name'] + elif 'displayname' in data: + name = data['displayname'] + else: + name = None + + state = self.NODE_STATE_MAP[data['state']] + + public_ips = public_ips if public_ips else [] + private_ips = [] + + for nic in data['nic']: + if is_private_subnet(nic['ipaddress']): + private_ips.append(nic['ipaddress']) + else: + public_ips.append(nic['ipaddress']) + + security_groups = data.get('securitygroup', []) + + if security_groups: + security_groups = [sg['name'] for sg in security_groups] + + created = data.get('created', False) + + extra = self._get_extra_dict(data, + RESOURCE_EXTRA_ATTRIBUTES_MAP['node']) + + # Add additional parameters to extra + extra['security_group'] = security_groups + extra['ip_addresses'] = [] + extra['ip_forwarding_rules'] = [] + extra['port_forwarding_rules'] = [] + extra['created'] = created + + if 'tags' in data: + extra['tags'] = self._get_resource_tags(data['tags']) + + node = CloudStackNode(id=id, name=name, state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self, extra=extra) + return node + + def _to_key_pairs(self, data): + key_pairs = [self._to_key_pair(data=item) for item in data] + return key_pairs + + def _to_key_pair(self, data): + key_pair = KeyPair(name=data['name'], + fingerprint=data['fingerprint'], + public_key=data.get('publickey', None), + private_key=data.get('privatekey', None), + driver=self) + return key_pair + + def _get_resource_tags(self, tag_set): + """ + Parse tags from the provided element and return a dictionary with + key/value pairs. + + :param tag_set: A list of key/value tag pairs + :type tag_set: ``list``` + + :rtype: ``dict`` + """ + tags = {} + + for tag in tag_set: + for key, value in tag.iteritems(): + key = tag['key'] + value = tag['value'] + tags[key] = value + + return tags + + def _get_extra_dict(self, response, mapping): + """ + Extract attributes from the element based on rules provided in the + mapping dictionary. + + :param response: The JSON response to parse the values from. + :type response: ``dict`` + + :param mapping: Dictionary with the extra layout + :type mapping: ``dict`` + + :rtype: ``dict`` + """ + extra = {} + for attribute, values in mapping.items(): + transform_func = values['transform_func'] + value = response.get(values['key_name'], None) + + if value is not None: + extra[attribute] = transform_func(value) + else: + extra[attribute] = None + + return extra diff -Nru libcloud-0.5.0/libcloud/compute/drivers/digitalocean.py libcloud-0.15.1/libcloud/compute/drivers/digitalocean.py --- libcloud-0.5.0/libcloud/compute/drivers/digitalocean.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/digitalocean.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,224 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Digital Ocean Driver +""" + +from libcloud.utils.py3 import httplib + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + + +class DigitalOceanResponse(JsonResponse): + def parse_error(self): + if self.status == httplib.FOUND and '/api/error' in self.body: + # Hacky, but DigitalOcean error responses are awful + raise InvalidCredsError(self.body) + elif self.status == httplib.UNAUTHORIZED: + body = self.parse_body() + raise InvalidCredsError(body['message']) + else: + body = self.parse_body() + + if 'error_message' in body: + error = '%s (code: %s)' % (body['error_message'], self.status) + else: + error = body + return error + + +class SSHKey(object): + def __init__(self, id, name, pub_key): + self.id = id + self.name = name + self.pub_key = pub_key + + def __repr__(self): + return (('') % + (self.id, self.name, self.pub_key)) + + +class DigitalOceanConnection(ConnectionUserAndKey): + """ + Connection class for the DigitalOcean driver. + """ + + host = 'api.digitalocean.com' + responseCls = DigitalOceanResponse + + def add_default_params(self, params): + """ + Add parameters that are necessary for every request + + This method adds ``client_id`` and ``api_key`` to + the request. + """ + params['client_id'] = self.user_id + params['api_key'] = self.key + return params + + +class DigitalOceanNodeDriver(NodeDriver): + """ + DigitalOceanNode node driver. + """ + + connectionCls = DigitalOceanConnection + + type = Provider.DIGITAL_OCEAN + name = 'Digital Ocean' + website = 'https://www.digitalocean.com' + + NODE_STATE_MAP = {'new': NodeState.PENDING, + 'off': NodeState.REBOOTING, + 'active': NodeState.RUNNING} + + def list_nodes(self): + data = self.connection.request('/droplets').object['droplets'] + return list(map(self._to_node, data)) + + def list_locations(self): + data = self.connection.request('/regions').object['regions'] + return list(map(self._to_location, data)) + + def list_images(self): + data = self.connection.request('/images').object['images'] + return list(map(self._to_image, data)) + + def list_sizes(self): + data = self.connection.request('/sizes').object['sizes'] + return list(map(self._to_size, data)) + + def create_node(self, name, size, image, location, ex_ssh_key_ids=None, + **kwargs): + """ + Create a node. + + :keyword ex_ssh_key_ids: A list of ssh key ids which will be added + to the server. (optional) + :type ex_ssh_key_ids: ``list`` of ``str`` + + :return: The newly created node. + :rtype: :class:`Node` + """ + params = {'name': name, 'size_id': size.id, 'image_id': image.id, + 'region_id': location.id} + + if ex_ssh_key_ids: + params['ssh_key_ids'] = ','.join(ex_ssh_key_ids) + + data = self.connection.request('/droplets/new', params=params).object + return self._to_node(data=data['droplet']) + + def reboot_node(self, node): + res = self.connection.request('/droplets/%s/reboot/' % (node.id)) + return res.status == httplib.OK + + def destroy_node(self, node): + params = {'scrub_data': '1'} + res = self.connection.request('/droplets/%s/destroy/' % (node.id), + params=params) + return res.status == httplib.OK + + def ex_rename_node(self, node, name): + params = {'name': name} + res = self.connection.request('/droplets/%s/rename/' % (node.id), + params=params) + return res.status == httplib.OK + + def ex_list_ssh_keys(self): + """ + List all the available SSH keys. + + :return: Available SSH keys. + :rtype: ``list`` of :class:`SSHKey` + """ + data = self.connection.request('/ssh_keys').object['ssh_keys'] + return list(map(self._to_ssh_key, data)) + + def ex_create_ssh_key(self, name, ssh_key_pub): + """ + Create a new SSH key. + + :param name: Key name (required) + :type name: ``str`` + + :param name: Valid public key string (required) + :type name: ``str`` + """ + params = {'name': name, 'ssh_pub_key': ssh_key_pub} + data = self.connection.request('/ssh_keys/new/', method='GET', + params=params).object + assert 'ssh_key' in data + return self._to_ssh_key(data=data['ssh_key']) + + def ex_destroy_ssh_key(self, key_id): + """ + Delete an existing SSH key. + + :param key_id: SSH key id (required) + :type key_id: ``str`` + """ + res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id)) + return res.status == httplib.OK + + def _to_node(self, data): + extra_keys = ['backups_active', 'region_id'] + if 'status' in data: + state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) + else: + state = NodeState.UNKNOWN + + if 'ip_address' in data and data['ip_address'] is not None: + public_ips = [data['ip_address']] + else: + public_ips = [] + + extra = {} + for key in extra_keys: + if key in data: + extra[key] = data[key] + + node = Node(id=data['id'], name=data['name'], state=state, + public_ips=public_ips, private_ips=None, extra=extra, + driver=self) + return node + + def _to_image(self, data): + extra = {'distribution': data['distribution']} + return NodeImage(id=data['id'], name=data['name'], extra=extra, + driver=self) + + def _to_location(self, data): + return NodeLocation(id=data['id'], name=data['name'], country=None, + driver=self) + + def _to_size(self, data): + ram = data['name'].lower() + + if 'mb' in ram: + ram = int(ram.replace('mb', '')) + elif 'gb' in ram: + ram = int(ram.replace('gb', '')) * 1024 + + return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0, + bandwidth=0, price=0, driver=self) + + def _to_ssh_key(self, data): + return SSHKey(id=data['id'], name=data['name'], + pub_key=data.get('ssh_pub_key', None)) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/dreamhost.py libcloud-0.15.1/libcloud/compute/drivers/dreamhost.py --- libcloud-0.5.0/libcloud/compute/drivers/dreamhost.py 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/dreamhost.py 2014-06-11 14:27:59.000000000 +0000 @@ -16,14 +16,9 @@ DreamHost Driver """ -try: - import json -except: - import simplejson as json - import copy -from libcloud.common.base import ConnectionKey, Response +from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.types import InvalidCredsError from libcloud.compute.base import Node, NodeDriver, NodeSize from libcloud.compute.base import NodeImage @@ -35,45 +30,44 @@ DH_PS_SIZES = { 'minimum': { - 'id' : 'minimum', - 'name' : 'Minimum DH PS size', - 'ram' : 300, - 'disk' : None, - 'bandwidth' : None + 'id': 'minimum', + 'name': 'Minimum DH PS size', + 'ram': 300, + 'disk': None, + 'bandwidth': None }, 'maximum': { - 'id' : 'maximum', - 'name' : 'Maximum DH PS size', - 'ram' : 4000, - 'disk' : None, - 'bandwidth' : None + 'id': 'maximum', + 'name': 'Maximum DH PS size', + 'ram': 4000, + 'disk': None, + 'bandwidth': None }, 'default': { - 'id' : 'default', - 'name' : 'Default DH PS size', - 'ram' : 2300, - 'disk' : None, - 'bandwidth' : None + 'id': 'default', + 'name': 'Default DH PS size', + 'ram': 2300, + 'disk': None, + 'bandwidth': None }, 'low': { - 'id' : 'low', - 'name' : 'DH PS with 1GB RAM', - 'ram' : 1000, - 'disk' : None, - 'bandwidth' : None + 'id': 'low', + 'name': 'DH PS with 1GB RAM', + 'ram': 1000, + 'disk': None, + 'bandwidth': None }, 'high': { - 'id' : 'high', - 'name' : 'DH PS with 3GB RAM', - 'ram' : 3000, - 'disk' : None, - 'bandwidth' : None + 'id': 'high', + 'name': 'DH PS with 3GB RAM', + 'ram': 3000, + 'disk': None, + 'bandwidth': None }, } class DreamhostAPIException(Exception): - def __str__(self): return self.args[0] @@ -81,13 +75,13 @@ return "" % (self.args[0]) -class DreamhostResponse(Response): +class DreamhostResponse(JsonResponse): """ Response class for DreamHost PS """ def parse_body(self): - resp = json.loads(self.body) + resp = super(DreamhostResponse, self).parse_body() if resp['result'] != 'success': raise Exception(self._api_parse_error(resp)) return resp['data'] @@ -105,6 +99,7 @@ else: raise DreamhostAPIException("Unknown problem: %s" % (self.body)) + class DreamhostConnection(ConnectionKey): """ Connection class to connect to DreamHost's API servers @@ -121,7 +116,7 @@ """ params['key'] = self.key params['format'] = self.format - #params['unique_id'] = generate_unique_id() + # params['unique_id'] = generate_unique_id() return params @@ -132,6 +127,7 @@ type = Provider.DREAMHOST api_name = 'dreamhost' name = "Dreamhost" + website = 'http://dreamhost.com/' connectionCls = DreamhostConnection _sizes = DH_PS_SIZES @@ -139,35 +135,35 @@ def create_node(self, **kwargs): """Create a new Dreamhost node - See L{NodeDriver.create_node} for more keyword args. + @inherits: :class:`NodeDriver.create_node` - @keyword ex_movedata: Copy all your existing users to this new PS - @type ex_movedata: C{str} + :keyword ex_movedata: Copy all your existing users to this new PS + :type ex_movedata: ``str`` """ size = kwargs['size'].ram params = { - 'cmd' : 'dreamhost_ps-add_ps', - 'movedata' : kwargs.get('movedata', 'no'), - 'type' : kwargs['image'].name, - 'size' : size + 'cmd': 'dreamhost_ps-add_ps', + 'movedata': kwargs.get('movedata', 'no'), + 'type': kwargs['image'].name, + 'size': size } data = self.connection.request('/', params).object return Node( - id = data['added_web'], - name = data['added_web'], - state = NodeState.PENDING, - public_ip = [], - private_ip = [], - driver = self.connection.driver, - extra = { - 'type' : kwargs['image'].name + id=data['added_web'], + name=data['added_web'], + state=NodeState.PENDING, + public_ips=[], + private_ips=[], + driver=self.connection.driver, + extra={ + 'type': kwargs['image'].name } ) def destroy_node(self, node): params = { - 'cmd' : 'dreamhost_ps-remove_ps', - 'ps' : node.id + 'cmd': 'dreamhost_ps-remove_ps', + 'ps': node.id } try: return self.connection.request('/', params).success() @@ -176,8 +172,8 @@ def reboot_node(self, node): params = { - 'cmd' : 'dreamhost_ps-reboot', - 'ps' : node.id + 'cmd': 'dreamhost_ps-reboot', + 'ps': node.id } try: return self.connection.request('/', params).success() @@ -195,17 +191,17 @@ images = [] for img in data: images.append(NodeImage( - id = img['image'], - name = img['image'], - driver = self.connection.driver + id=img['image'], + name=img['image'], + driver=self.connection.driver )) return images def list_sizes(self, **kwargs): sizes = [] - for key, values in self._sizes.iteritems(): + for key, values in self._sizes.items(): attributes = copy.deepcopy(values) - attributes.update({ 'price': self._get_size_price(size_id=key) }) + attributes.update({'price': self._get_size_price(size_id=key)}) sizes.append(NodeSize(driver=self.connection.driver, **attributes)) return sizes @@ -215,17 +211,14 @@ 'You cannot select a location for ' 'DreamHost Private Servers at this time.') - ############################################ - # Private Methods (helpers and extensions) # - ############################################ def _resize_node(self, node, size): if (size < 300 or size > 4000): return False params = { - 'cmd' : 'dreamhost_ps-set_size', - 'ps' : node.id, - 'size' : size + 'cmd': 'dreamhost_ps-set_size', + 'ps': node.id, + 'size': size } try: return self.connection.request('/', params).success() @@ -237,13 +230,13 @@ Convert the data from a DreamhostResponse object into a Node """ return Node( - id = data['ps'], - name = data['ps'], - state = NodeState.UNKNOWN, - public_ip = [data['ip']], - private_ip = [], - driver = self.connection.driver, - extra = { - 'current_size' : data['memory_mb'], - 'account_id' : data['account_id'], - 'type' : data['type']}) + id=data['ps'], + name=data['ps'], + state=NodeState.UNKNOWN, + public_ips=[data['ip']], + private_ips=[], + driver=self.connection.driver, + extra={ + 'current_size': data['memory_mb'], + 'account_id': data['account_id'], + 'type': data['type']}) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/dummy.py libcloud-0.15.1/libcloud/compute/drivers/dummy.py --- libcloud-0.5.0/libcloud/compute/drivers/dummy.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/dummy.py 2014-06-11 14:27:59.000000000 +0000 @@ -21,9 +21,12 @@ import socket import struct -from libcloud.base import ConnectionKey, NodeDriver, NodeSize, NodeLocation -from libcloud.compute.base import NodeImage, Node -from libcloud.compute.types import Provider,NodeState +from libcloud.common.base import ConnectionKey +from libcloud.compute.base import NodeImage, NodeSize, Node +from libcloud.compute.base import NodeDriver, NodeLocation +from libcloud.compute.base import KeyPair +from libcloud.compute.types import Provider, NodeState + class DummyConnection(ConnectionKey): """ @@ -33,6 +36,7 @@ def connect(self, host=None, port=None): pass + class DummyNodeDriver(NodeDriver): """ Dummy node driver @@ -43,7 +47,7 @@ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> node=driver.create_node() - >>> node.public_ip[0] + >>> node.public_ips[0] '127.0.0.3' >>> node.name 'dummy-3' @@ -61,48 +65,61 @@ """ name = "Dummy Node Provider" + website = 'http://example.com' type = Provider.DUMMY def __init__(self, creds): + """ + :param creds: Credentials + :type creds: ``str`` + + :rtype: ``None`` + """ self.creds = creds try: - num = int(creds) + num = int(creds) except ValueError: - num = None + num = None if num: - self.nl = [] - startip = _ip_to_int('127.0.0.1') - for i in xrange(num): - ip = _int_to_ip(startip + i) - self.nl.append( - Node(id=i, - name='dummy-%d' % (i), - state=NodeState.RUNNING, - public_ip=[ip], - private_ip=[], - driver=self, - extra={'foo': 'bar'}) - ) + self.nl = [] + startip = _ip_to_int('127.0.0.1') + for i in range(num): + ip = _int_to_ip(startip + i) + self.nl.append( + Node(id=i, + name='dummy-%d' % (i), + state=NodeState.RUNNING, + public_ips=[ip], + private_ips=[], + driver=self, + extra={'foo': 'bar'}) + ) else: - self.nl = [ - Node(id=1, - name='dummy-1', - state=NodeState.RUNNING, - public_ip=['127.0.0.1'], - private_ip=[], - driver=self, - extra={'foo': 'bar'}), - Node(id=2, - name='dummy-2', - state=NodeState.RUNNING, - public_ip=['127.0.0.1'], - private_ip=[], - driver=self, - extra={'foo': 'bar'}), - ] + self.nl = [ + Node(id=1, + name='dummy-1', + state=NodeState.RUNNING, + public_ips=['127.0.0.1'], + private_ips=[], + driver=self, + extra={'foo': 'bar'}), + Node(id=2, + name='dummy-2', + state=NodeState.RUNNING, + public_ips=['127.0.0.1'], + private_ips=[], + driver=self, + extra={'foo': 'bar'}), + ] self.connection = DummyConnection(self.creds) def get_uuid(self, unique_field=None): + """ + + :param unique_field: Unique field + :type unique_field: ``bool`` + :rtype: :class:`UUID` + """ return str(uuid.uuid4()) def list_nodes(self): @@ -125,8 +142,14 @@ As more nodes are added, list_nodes will return them >>> node=driver.create_node() - >>> sorted([node.name for node in driver.list_nodes()]) + >>> node.size.id + 's1' + >>> node.image.id + 'i2' + >>> sorted([n.name for n in driver.list_nodes()]) ['dummy-1', 'dummy-2', 'dummy-3'] + + @inherits: :class:`NodeDriver.list_nodes` """ return self.nl @@ -149,6 +172,8 @@ True Please note, dummy nodes never recover from the reboot. + + @inherits: :class:`NodeDriver.reboot_node` """ node.state = NodeState.REBOOTING @@ -161,15 +186,18 @@ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) >>> from libcloud.compute.types import NodeState - >>> node = [node for node in driver.list_nodes() if node.name == 'dummy-1'][0] + >>> node = [node for node in driver.list_nodes() if + ... node.name == 'dummy-1'][0] >>> node.state == NodeState.RUNNING True >>> driver.destroy_node(node) True >>> node.state == NodeState.RUNNING False - >>> [node for node in driver.list_nodes() if node.name == 'dummy-1'] + >>> [n for n in driver.list_nodes() if n.name == 'dummy-1'] [] + + @inherits: :class:`NodeDriver.destroy_node` """ node.state = NodeState.TERMINATED @@ -184,6 +212,8 @@ >>> driver = DummyNodeDriver(0) >>> sorted([image.name for image in driver.list_images()]) ['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10'] + + @inherits: :class:`NodeDriver.list_images` """ return [ NodeImage(id=1, name="Ubuntu 9.10", driver=self), @@ -199,6 +229,8 @@ >>> driver = DummyNodeDriver(0) >>> sorted([size.ram for size in driver.list_sizes()]) [128, 512, 4096, 8192] + + @inherits: :class:`NodeDriver.list_images` """ return [ @@ -225,10 +257,10 @@ driver=self), NodeSize(id=4, name="XXL Big", - ram=4096*2, - disk=32*4, - bandwidth=2500*3, - price=32*2, + ram=4096 * 2, + disk=32 * 4, + bandwidth=2500 * 3, + price=32 * 2, driver=self), ] @@ -238,8 +270,11 @@ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver >>> driver = DummyNodeDriver(0) - >>> sorted([loc.name + " in " + loc.country for loc in driver.list_locations()]) + >>> sorted([loc.name + " in " + loc.country for loc in + ... driver.list_locations()]) ['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"] + + @inherits: :class:`NodeDriver.list_locations` """ return [ NodeLocation(id=1, @@ -274,24 +309,41 @@ True >>> sorted([node.name for node in driver.list_nodes()]) ['dummy-1', 'dummy-2', 'dummy-4'] + + @inherits: :class:`NodeDriver.create_node` """ l = len(self.nl) + 1 n = Node(id=l, name='dummy-%d' % l, state=NodeState.RUNNING, - public_ip=['127.0.0.%d' % l], - private_ip=[], + public_ips=['127.0.0.%d' % l], + private_ips=[], driver=self, + size=NodeSize(id='s1', name='foo', ram=2048, + disk=160, bandwidth=None, price=0.0, + driver=self), + image=NodeImage(id='i2', name='image', driver=self), extra={'foo': 'bar'}) self.nl.append(n) return n + def import_key_pair_from_string(self, name, key_material): + key_pair = KeyPair(name=name, + public_key=key_material, + fingerprint='fingerprint', + private_key='private_key', + driver=self) + return key_pair + + def _ip_to_int(ip): return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0]) + def _int_to_ip(ip): return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip))) if __name__ == "__main__": import doctest + doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ec2.py libcloud-0.15.1/libcloud/compute/drivers/ec2.py --- libcloud-0.5.0/libcloud/compute/drivers/ec2.py 2011-05-10 15:36:21.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ec2.py 2014-07-02 18:47:55.000000000 +0000 @@ -14,42 +14,80 @@ # limitations under the License. """ -Amazon EC2 driver +Amazon EC2, Eucalyptus, Nimbus and Outscale drivers. """ + +import re +import sys import base64 -import hmac -import os -import time -import urllib import copy +import warnings -from hashlib import sha256 -from xml.etree import ElementTree as ET - -from libcloud.utils import fixxpath, findtext, findattr, findall -from libcloud.common.base import ConnectionUserAndKey -from libcloud.common.aws import AWSBaseResponse -from libcloud.common.types import InvalidCredsError, MalformedResponseError, LibcloudError +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import b, basestring, ensure_string + +from libcloud.utils.xml import fixxpath, findtext, findattr, findall +from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint +from libcloud.utils.publickey import get_pubkey_comment +from libcloud.utils.iso8601 import parse_date +from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection +from libcloud.common.types import (InvalidCredsError, MalformedResponseError, + LibcloudError) from libcloud.compute.providers import Provider -from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize -from libcloud.compute.base import NodeImage - -EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com' -EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com' -EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com' -EC2_AP_SOUTHEAST_HOST = 'ec2.ap-southeast-1.amazonaws.com' -EC2_AP_NORTHEAST_HOST = 'ec2.ap-northeast-1.amazonaws.com' - -API_VERSION = '2010-08-31' - -NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION) +from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot +from libcloud.compute.base import KeyPair +from libcloud.compute.types import NodeState, KeyPairDoesNotExistError + +__all__ = [ + 'API_VERSION', + 'NAMESPACE', + 'INSTANCE_TYPES', + 'OUTSCALE_INSTANCE_TYPES', + 'OUTSCALE_SAS_REGION_DETAILS', + 'OUTSCALE_INC_REGION_DETAILS', + 'DEFAULT_EUCA_API_VERSION', + 'EUCA_NAMESPACE', + + 'EC2NodeDriver', + 'BaseEC2NodeDriver', + + 'NimbusNodeDriver', + 'EucNodeDriver', + + 'OutscaleSASNodeDriver', + 'OutscaleINCNodeDriver', + + 'EC2NodeLocation', + 'EC2ReservedNode', + 'EC2SecurityGroup', + 'EC2Network', + 'EC2NetworkSubnet', + 'EC2NetworkInterface', + 'EC2RouteTable', + 'EC2Route', + 'EC2SubnetAssociation', + 'ExEC2AvailabilityZone', + + 'IdempotentParamError' +] + +API_VERSION = '2013-10-15' +NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION) + +# Eucalyptus Constants +DEFAULT_EUCA_API_VERSION = '3.3.0' +EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION) """ Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them. From http://aws.amazon.com/ec2/instance-types/ """ -EC2_INSTANCE_TYPES = { +INSTANCE_TYPES = { 't1.micro': { 'id': 't1.micro', 'name': 'Micro Instance', @@ -64,6 +102,13 @@ 'disk': 160, 'bandwidth': None }, + 'm1.medium': { + 'id': 'm1.medium', + 'name': 'Medium Instance', + 'ram': 3700, + 'disk': 410, + 'bandwidth': None + }, 'm1.large': { 'id': 'm1.large', 'name': 'Large Instance', @@ -113,6 +158,34 @@ 'disk': 1690, 'bandwidth': None }, + 'm3.medium': { + 'id': 'm3.medium', + 'name': 'Medium Instance', + 'ram': 3840, + 'disk': 4000, + 'bandwidth': None + }, + 'm3.large': { + 'id': 'm3.large', + 'name': 'Large Instance', + 'ram': 7168, + 'disk': 32000, + 'bandwidth': None + }, + 'm3.xlarge': { + 'id': 'm3.xlarge', + 'name': 'Extra Large Instance', + 'ram': 15360, + 'disk': 80000, + 'bandwidth': None + }, + 'm3.2xlarge': { + 'id': 'm3.2xlarge', + 'name': 'Double Extra Large Instance', + 'ram': 30720, + 'disk': 160000, + 'bandwidth': None + }, 'cg1.4xlarge': { 'id': 'cg1.4xlarge', 'name': 'Cluster GPU Quadruple Extra Large Instance', @@ -120,6 +193,13 @@ 'disk': 1690, 'bandwidth': None }, + 'g2.2xlarge': { + 'id': 'g2.2xlarge', + 'name': 'Cluster GPU G2 Double Extra Large Instance', + 'ram': 15000, + 'disk': 60, + 'bandwidth': None, + }, 'cc1.4xlarge': { 'id': 'cc1.4xlarge', 'name': 'Cluster Compute Quadruple Extra Large Instance', @@ -127,15 +207,1309 @@ 'disk': 1690, 'bandwidth': None }, + 'cc2.8xlarge': { + 'id': 'cc2.8xlarge', + 'name': 'Cluster Compute Eight Extra Large Instance', + 'ram': 63488, + 'disk': 3370, + 'bandwidth': None + }, + # c3 instances have 2 SSDs of the specified disk size + 'c3.large': { + 'id': 'c3.large', + 'name': 'Compute Optimized Large Instance', + 'ram': 3750, + 'disk': 16, + 'bandwidth': None + }, + 'c3.xlarge': { + 'id': 'c3.xlarge', + 'name': 'Compute Optimized Extra Large Instance', + 'ram': 7000, + 'disk': 40, + 'bandwidth': None + }, + 'c3.2xlarge': { + 'id': 'c3.2xlarge', + 'name': 'Compute Optimized Double Extra Large Instance', + 'ram': 15000, + 'disk': 80, + 'bandwidth': None + }, + 'c3.4xlarge': { + 'id': 'c3.4xlarge', + 'name': 'Compute Optimized Quadruple Extra Large Instance', + 'ram': 30000, + 'disk': 160, + 'bandwidth': None + }, + 'c3.8xlarge': { + 'id': 'c3.8xlarge', + 'name': 'Compute Optimized Eight Extra Large Instance', + 'ram': 60000, + 'disk': 320, + 'bandwidth': None + }, + 'cr1.8xlarge': { + 'id': 'cr1.8xlarge', + 'name': 'High Memory Cluster Eight Extra Large', + 'ram': 244000, + 'disk': 240, + 'bandwidth': None + }, + 'hs1.4xlarge': { + 'id': 'hs1.4xlarge', + 'name': 'High Storage Quadruple Extra Large Instance', + 'ram': 61952, + 'disk': 2048, + 'bandwidth': None + }, + 'hs1.8xlarge': { + 'id': 'hs1.8xlarge', + 'name': 'High Storage Eight Extra Large Instance', + 'ram': 119808, + 'disk': 48000, + 'bandwidth': None + }, + # i2 instances have up to eight SSD drives + 'i2.xlarge': { + 'id': 'i2.xlarge', + 'name': 'High Storage Optimized Extra Large Instance', + 'ram': 31232, + 'disk': 800, + 'bandwidth': None + }, + 'i2.2xlarge': { + 'id': 'i2.2xlarge', + 'name': 'High Storage Optimized Double Extra Large Instance', + 'ram': 62464, + 'disk': 1600, + 'bandwidth': None + }, + 'i2.4xlarge': { + 'id': 'i2.4xlarge', + 'name': 'High Storage Optimized Quadruple Large Instance', + 'ram': 124928, + 'disk': 3200, + 'bandwidth': None + }, + 'i2.8xlarge': { + 'id': 'i2.8xlarge', + 'name': 'High Storage Optimized Eight Extra Large Instance', + 'ram': 249856, + 'disk': 6400, + 'bandwidth': None + }, + # 1x SSD + 'r3.large': { + 'id': 'r3.large', + 'name': 'Memory Optimized Large instance', + 'ram': 15000, + 'disk': 32, + 'bandwidth': None + }, + 'r3.xlarge': { + 'id': 'r3.xlarge', + 'name': 'Memory Optimized Extra Large instance', + 'ram': 30500, + 'disk': 80, + 'bandwidth': None + }, + 'r3.2xlarge': { + 'id': 'r3.2xlarge', + 'name': 'Memory Optimized Double Extra Large instance', + 'ram': 61000, + 'disk': 160, + 'bandwidth': None + }, + 'r3.4xlarge': { + 'id': 'r3.4xlarge', + 'name': 'Memory Optimized Quadruple Extra Large instance', + 'ram': 122000, + 'disk': 320, + 'bandwidth': None + }, + 'r3.8xlarge': { + 'id': 'r3.8xlarge', + 'name': 'Memory Optimized Eight Extra Large instance', + 'ram': 244000, + 'disk': 320, # x2 + 'bandwidth': None + } +} + +REGION_DETAILS = { + # US East (Northern Virginia) Region + 'us-east-1': { + 'endpoint': 'ec2.us-east-1.amazonaws.com', + 'api_name': 'ec2_us_east', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'cc2.8xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'cg1.4xlarge', + 'g2.2xlarge', + 'cr1.8xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + # US West (Northern California) Region + 'us-west-1': { + 'endpoint': 'ec2.us-west-1.amazonaws.com', + 'api_name': 'ec2_us_west', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'g2.2xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + # US West (Oregon) Region + 'us-west-2': { + 'endpoint': 'ec2.us-west-2.amazonaws.com', + 'api_name': 'ec2_us_west_oregon', + 'country': 'US', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'g2.2xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.8xlarge', + 'cc2.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + # EU (Ireland) Region + 'eu-west-1': { + 'endpoint': 'ec2.eu-west-1.amazonaws.com', + 'api_name': 'ec2_eu_west', + 'country': 'Ireland', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'g2.2xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.8xlarge', + 'cc2.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + # Asia Pacific (Singapore) Region + 'ap-southeast-1': { + 'endpoint': 'ec2.ap-southeast-1.amazonaws.com', + 'api_name': 'ec2_ap_southeast', + 'country': 'Singapore', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + ] + }, + # Asia Pacific (Tokyo) Region + 'ap-northeast-1': { + 'endpoint': 'ec2.ap-northeast-1.amazonaws.com', + 'api_name': 'ec2_ap_northeast', + 'country': 'Japan', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'g2.2xlarge', + 'c1.xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + # South America (Sao Paulo) Region + 'sa-east-1': { + 'endpoint': 'ec2.sa-east-1.amazonaws.com', + 'api_name': 'ec2_sa_east', + 'country': 'Brazil', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge' + ] + }, + # Asia Pacific (Sydney) Region + 'ap-southeast-2': { + 'endpoint': 'ec2.ap-southeast-2.amazonaws.com', + 'api_name': 'ec2_ap_southeast_2', + 'country': 'Australia', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' + ] + }, + 'nimbus': { + # Nimbus clouds have 3 EC2-style instance types but their particular + # RAM allocations are configured by the admin + 'country': 'custom', + 'instance_types': [ + 'm1.small', + 'm1.large', + 'm1.xlarge' + ] + } +} + + +""" +Sizes must be hardcoded because Outscale doesn't provide an API to fetch them. +Outscale cloud instances share some names with EC2 but have differents +specifications so declare them in another constant. +""" +OUTSCALE_INSTANCE_TYPES = { + 't1.micro': { + 'id': 't1.micro', + 'name': 'Micro Instance', + 'ram': 615, + 'disk': 0, + 'bandwidth': None + }, + 'm1.small': { + 'id': 'm1.small', + 'name': 'Standard Small Instance', + 'ram': 1740, + 'disk': 150, + 'bandwidth': None + }, + 'm1.medium': { + 'id': 'm1.medium', + 'name': 'Standard Medium Instance', + 'ram': 3840, + 'disk': 420, + 'bandwidth': None + }, + 'm1.large': { + 'id': 'm1.large', + 'name': 'Standard Large Instance', + 'ram': 7680, + 'disk': 840, + 'bandwidth': None + }, + 'm1.xlarge': { + 'id': 'm1.xlarge', + 'name': 'Standard Extra Large Instance', + 'ram': 15360, + 'disk': 1680, + 'bandwidth': None + }, + 'c1.medium': { + 'id': 'c1.medium', + 'name': 'Compute Optimized Medium Instance', + 'ram': 1740, + 'disk': 340, + 'bandwidth': None + }, + 'c1.xlarge': { + 'id': 'c1.xlarge', + 'name': 'Compute Optimized Extra Large Instance', + 'ram': 7168, + 'disk': 1680, + 'bandwidth': None + }, + 'c3.large': { + 'id': 'c3.large', + 'name': 'Compute Optimized Large Instance', + 'ram': 3840, + 'disk': 32, + 'bandwidth': None + }, + 'c3.xlarge': { + 'id': 'c3.xlarge', + 'name': 'Compute Optimized Extra Large Instance', + 'ram': 7168, + 'disk': 80, + 'bandwidth': None + }, + 'c3.2xlarge': { + 'id': 'c3.2xlarge', + 'name': 'Compute Optimized Double Extra Large Instance', + 'ram': 15359, + 'disk': 160, + 'bandwidth': None + }, + 'c3.4xlarge': { + 'id': 'c3.4xlarge', + 'name': 'Compute Optimized Quadruple Extra Large Instance', + 'ram': 30720, + 'disk': 320, + 'bandwidth': None + }, + 'c3.8xlarge': { + 'id': 'c3.8xlarge', + 'name': 'Compute Optimized Eight Extra Large Instance', + 'ram': 61440, + 'disk': 640, + 'bandwidth': None + }, + 'm2.xlarge': { + 'id': 'm2.xlarge', + 'name': 'High Memory Extra Large Instance', + 'ram': 17510, + 'disk': 420, + 'bandwidth': None + }, + 'm2.2xlarge': { + 'id': 'm2.2xlarge', + 'name': 'High Memory Double Extra Large Instance', + 'ram': 35020, + 'disk': 840, + 'bandwidth': None + }, + 'm2.4xlarge': { + 'id': 'm2.4xlarge', + 'name': 'High Memory Quadruple Extra Large Instance', + 'ram': 70042, + 'disk': 1680, + 'bandwidth': None + }, + 'nv1.small': { + 'id': 'nv1.small', + 'name': 'GPU Small Instance', + 'ram': 1739, + 'disk': 150, + 'bandwidth': None + }, + 'nv1.medium': { + 'id': 'nv1.medium', + 'name': 'GPU Medium Instance', + 'ram': 3839, + 'disk': 420, + 'bandwidth': None + }, + 'nv1.large': { + 'id': 'nv1.large', + 'name': 'GPU Large Instance', + 'ram': 7679, + 'disk': 840, + 'bandwidth': None + }, + 'nv1.xlarge': { + 'id': 'nv1.xlarge', + 'name': 'GPU Extra Large Instance', + 'ram': 15358, + 'disk': 1680, + 'bandwidth': None + }, + 'g2.2xlarge': { + 'id': 'g2.2xlarge', + 'name': 'GPU Double Extra Large Instance', + 'ram': 15360, + 'disk': 60, + 'bandwidth': None + }, + 'cc1.4xlarge': { + 'id': 'cc1.4xlarge', + 'name': 'Cluster Compute Quadruple Extra Large Instance', + 'ram': 24576, + 'disk': 1680, + 'bandwidth': None + }, + 'cc2.8xlarge': { + 'id': 'cc2.8xlarge', + 'name': 'Cluster Compute Eight Extra Large Instance', + 'ram': 65536, + 'disk': 3360, + 'bandwidth': None + }, + 'hi1.xlarge': { + 'id': 'hi1.xlarge', + 'name': 'High Storage Extra Large Instance', + 'ram': 15361, + 'disk': 1680, + 'bandwidth': None + }, + 'm3.xlarge': { + 'id': 'm3.xlarge', + 'name': 'High Storage Optimized Extra Large Instance', + 'ram': 15357, + 'disk': 0, + 'bandwidth': None + }, + 'm3.2xlarge': { + 'id': 'm3.2xlarge', + 'name': 'High Storage Optimized Double Extra Large Instance', + 'ram': 30720, + 'disk': 0, + 'bandwidth': None + }, + 'm3s.xlarge': { + 'id': 'm3s.xlarge', + 'name': 'High Storage Optimized Extra Large Instance', + 'ram': 15359, + 'disk': 0, + 'bandwidth': None + }, + 'm3s.2xlarge': { + 'id': 'm3s.2xlarge', + 'name': 'High Storage Optimized Double Extra Large Instance', + 'ram': 30719, + 'disk': 0, + 'bandwidth': None + }, + 'cr1.8xlarge': { + 'id': 'cr1.8xlarge', + 'name': 'Memory Optimized Eight Extra Large Instance', + 'ram': 249855, + 'disk': 240, + 'bandwidth': None + }, + 'os1.2xlarge': { + 'id': 'os1.2xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra ' + 'Large Instance', + 'ram': 65536, + 'disk': 60, + 'bandwidth': None + }, + 'os1.4xlarge': { + 'id': 'os1.4xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext' + 'ra Large Instance', + 'ram': 131072, + 'disk': 120, + 'bandwidth': None + }, + 'os1.8xlarge': { + 'id': 'os1.8xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L' + 'arge Instance', + 'ram': 249856, + 'disk': 500, + 'bandwidth': None + }, + 'oc1.4xlarge': { + 'id': 'oc1.4xlarge', + 'name': 'Outscale Quadruple Extra Large Instance', + 'ram': 24575, + 'disk': 1680, + 'bandwidth': None + }, + 'oc2.8xlarge': { + 'id': 'oc2.8xlarge', + 'name': 'Outscale Eight Extra Large Instance', + 'ram': 65535, + 'disk': 3360, + 'bandwidth': None + } +} + + +""" +The function manipulating Outscale cloud regions will be overriden because +Outscale instances types are in a separate dict so also declare Outscale cloud +regions in some other constants. +""" +OUTSCALE_SAS_REGION_DETAILS = { + 'eu-west-3': { + 'endpoint': 'api-ppd.outscale.com', + 'api_name': 'osc_sas_eu_west_3', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'eu-west-1': { + 'endpoint': 'api.eu-west-1.outscale.com', + 'api_name': 'osc_sas_eu_west_1', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'us-east-1': { + 'endpoint': 'api.us-east-1.outscale.com', + 'api_name': 'osc_sas_us_east_1', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + } +} + + +OUTSCALE_INC_REGION_DETAILS = { + 'eu-west-1': { + 'endpoint': 'api.eu-west-1.outscale.com', + 'api_name': 'osc_inc_eu_west_1', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'eu-west-3': { + 'endpoint': 'api-ppd.outscale.com', + 'api_name': 'osc_inc_eu_west_3', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'us-east-1': { + 'endpoint': 'api.us-east-1.outscale.com', + 'api_name': 'osc_inc_us_east_1', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + } +} + + +""" +Define the extra dictionary for specific resources +""" +RESOURCE_EXTRA_ATTRIBUTES_MAP = { + 'ebs_volume': { + 'snapshot_id': { + 'xpath': 'ebs/snapshotId', + 'transform_func': str + }, + 'volume_id': { + 'xpath': 'ebs/volumeId', + 'transform_func': str + }, + 'volume_size': { + 'xpath': 'ebs/volumeSize', + 'transform_func': int + }, + 'delete': { + 'xpath': 'ebs/deleteOnTermination', + 'transform_func': str + }, + 'volume_type': { + 'xpath': 'ebs/volumeType', + 'transform_func': str + }, + 'iops': { + 'xpath': 'ebs/iops', + 'transform_func': int + } + }, + 'elastic_ip': { + 'allocation_id': { + 'xpath': 'allocationId', + 'transform_func': str, + }, + 'association_id': { + 'xpath': 'associationId', + 'transform_func': str, + }, + 'interface_id': { + 'xpath': 'networkInterfaceId', + 'transform_func': str, + }, + 'owner_id': { + 'xpath': 'networkInterfaceOwnerId', + 'transform_func': str, + }, + 'private_ip': { + 'xpath': 'privateIp', + 'transform_func': str, + } + }, + 'image': { + 'state': { + 'xpath': 'imageState', + 'transform_func': str + }, + 'owner_id': { + 'xpath': 'imageOwnerId', + 'transform_func': str + }, + 'owner_alias': { + 'xpath': 'imageOwnerAlias', + 'transform_func': str + }, + 'is_public': { + 'xpath': 'isPublic', + 'transform_func': str + }, + 'architecture': { + 'xpath': 'architecture', + 'transform_func': str + }, + 'image_type': { + 'xpath': 'imageType', + 'transform_func': str + }, + 'image_location': { + 'xpath': 'imageLocation', + 'transform_func': str + }, + 'platform': { + 'xpath': 'platform', + 'transform_func': str + }, + 'description': { + 'xpath': 'description', + 'transform_func': str + }, + 'root_device_type': { + 'xpath': 'rootDeviceType', + 'transform_func': str + }, + 'virtualization_type': { + 'xpath': 'virtualizationType', + 'transform_func': str + }, + 'hypervisor': { + 'xpath': 'hypervisor', + 'transform_func': str + }, + 'kernel_id': { + 'xpath': 'kernelId', + 'transform_func': str + }, + 'ramdisk_id': { + 'xpath': 'ramdiskId', + 'transform_func': str + } + }, + 'network': { + 'state': { + 'xpath': 'state', + 'transform_func': str + }, + 'dhcp_options_id': { + 'xpath': 'dhcpOptionsId', + 'transform_func': str + }, + 'instance_tenancy': { + 'xpath': 'instanceTenancy', + 'transform_func': str + }, + 'is_default': { + 'xpath': 'isDefault', + 'transform_func': str + } + }, + 'network_interface': { + 'subnet_id': { + 'xpath': 'subnetId', + 'transform_func': str + }, + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + }, + 'zone': { + 'xpath': 'availabilityZone', + 'transform_func': str + }, + 'description': { + 'xpath': 'description', + 'transform_func': str + }, + 'owner_id': { + 'xpath': 'ownerId', + 'transform_func': str + }, + 'mac_address': { + 'xpath': 'macAddress', + 'transform_func': str + }, + 'private_dns_name': { + 'xpath': 'privateIpAddressesSet/privateDnsName', + 'transform_func': str + }, + 'source_dest_check': { + 'xpath': 'sourceDestCheck', + 'transform_func': str + } + }, + 'network_interface_attachment': { + 'attachment_id': { + 'xpath': 'attachment/attachmentId', + 'transform_func': str + }, + 'instance_id': { + 'xpath': 'attachment/instanceId', + 'transform_func': str + }, + 'owner_id': { + 'xpath': 'attachment/instanceOwnerId', + 'transform_func': str + }, + 'device_index': { + 'xpath': 'attachment/deviceIndex', + 'transform_func': int + }, + 'status': { + 'xpath': 'attachment/status', + 'transform_func': str + }, + 'attach_time': { + 'xpath': 'attachment/attachTime', + 'transform_func': parse_date + }, + 'delete': { + 'xpath': 'attachment/deleteOnTermination', + 'transform_func': str + } + }, + 'node': { + 'availability': { + 'xpath': 'placement/availabilityZone', + 'transform_func': str + }, + 'architecture': { + 'xpath': 'architecture', + 'transform_func': str + }, + 'client_token': { + 'xpath': 'clientToken', + 'transform_func': str + }, + 'dns_name': { + 'xpath': 'dnsName', + 'transform_func': str + }, + 'hypervisor': { + 'xpath': 'hypervisor', + 'transform_func': str + }, + 'iam_profile': { + 'xpath': 'iamInstanceProfile/id', + 'transform_func': str + }, + 'image_id': { + 'xpath': 'imageId', + 'transform_func': str + }, + 'instance_id': { + 'xpath': 'instanceId', + 'transform_func': str + }, + 'instance_lifecycle': { + 'xpath': 'instanceLifecycle', + 'transform_func': str + }, + 'instance_tenancy': { + 'xpath': 'placement/tenancy', + 'transform_func': str + }, + 'instance_type': { + 'xpath': 'instanceType', + 'transform_func': str + }, + 'key_name': { + 'xpath': 'keyName', + 'transform_func': str + }, + 'launch_index': { + 'xpath': 'amiLaunchIndex', + 'transform_func': int + }, + 'launch_time': { + 'xpath': 'launchTime', + 'transform_func': str + }, + 'kernel_id': { + 'xpath': 'kernelId', + 'transform_func': str + }, + 'monitoring': { + 'xpath': 'monitoring/state', + 'transform_func': str + }, + 'platform': { + 'xpath': 'platform', + 'transform_func': str + }, + 'private_dns': { + 'xpath': 'privateDnsName', + 'transform_func': str + }, + 'ramdisk_id': { + 'xpath': 'ramdiskId', + 'transform_func': str + }, + 'root_device_type': { + 'xpath': 'rootDeviceType', + 'transform_func': str + }, + 'root_device_name': { + 'xpath': 'rootDeviceName', + 'transform_func': str + }, + 'reason': { + 'xpath': 'reason', + 'transform_func': str + }, + 'source_dest_check': { + 'xpath': 'sourceDestCheck', + 'transform_func': str + }, + 'status': { + 'xpath': 'instanceState/name', + 'transform_func': str + }, + 'subnet_id': { + 'xpath': 'subnetId', + 'transform_func': str + }, + 'virtualization_type': { + 'xpath': 'virtualizationType', + 'transform_func': str + }, + 'ebs_optimized': { + 'xpath': 'ebsOptimized', + 'transform_func': str + }, + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + } + }, + 'reserved_node': { + 'instance_type': { + 'xpath': 'instanceType', + 'transform_func': str + }, + 'availability': { + 'xpath': 'availabilityZone', + 'transform_func': str + }, + 'start': { + 'xpath': 'start', + 'transform_func': str + }, + 'duration': { + 'xpath': 'duration', + 'transform_func': int + }, + 'usage_price': { + 'xpath': 'usagePrice', + 'transform_func': float + }, + 'fixed_price': { + 'xpath': 'fixedPrice', + 'transform_func': float + }, + 'instance_count': { + 'xpath': 'instanceCount', + 'transform_func': int + }, + 'description': { + 'xpath': 'productDescription', + 'transform_func': str + }, + 'instance_tenancy': { + 'xpath': 'instanceTenancy', + 'transform_func': str + }, + 'currency_code': { + 'xpath': 'currencyCode', + 'transform_func': str + }, + 'offering_type': { + 'xpath': 'offeringType', + 'transform_func': str + } + }, + 'security_group': { + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + }, + 'description': { + 'xpath': 'groupDescription', + 'transform_func': str + }, + 'owner_id': { + 'xpath': 'ownerId', + 'transform_func': str + } + }, + 'snapshot': { + 'volume_id': { + 'xpath': 'volumeId', + 'transform_func': str + }, + 'state': { + 'xpath': 'status', + 'transform_func': str + }, + 'description': { + 'xpath': 'description', + 'transform_func': str + }, + 'progress': { + 'xpath': 'progress', + 'transform_func': str + }, + 'start_time': { + 'xpath': 'startTime', + 'transform_func': parse_date + } + }, + 'subnet': { + 'cidr_block': { + 'xpath': 'cidrBlock', + 'transform_func': str + }, + 'available_ips': { + 'xpath': 'availableIpAddressCount', + 'transform_func': int + }, + 'zone': { + 'xpath': 'availabilityZone', + 'transform_func': str + }, + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + } + }, + 'volume': { + 'device': { + 'xpath': 'attachmentSet/item/device', + 'transform_func': str + }, + 'iops': { + 'xpath': 'iops', + 'transform_func': int + }, + 'zone': { + 'xpath': 'availabilityZone', + 'transform_func': str + }, + 'create_time': { + 'xpath': 'createTime', + 'transform_func': parse_date + }, + 'state': { + 'xpath': 'status', + 'transform_func': str + }, + 'attach_time': { + 'xpath': 'attachmentSet/item/attachTime', + 'transform_func': parse_date + }, + 'attachment_status': { + 'xpath': 'attachmentSet/item/status', + 'transform_func': str + }, + 'instance_id': { + 'xpath': 'attachmentSet/item/instanceId', + 'transform_func': str + }, + 'delete': { + 'xpath': 'attachmentSet/item/deleteOnTermination', + 'transform_func': str + } + }, + 'route_table': { + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + } + } } -CLUSTER_INSTANCES_IDS = [ 'cg1.4xlarge', 'cc1.4xlarge' ] +VALID_EC2_REGIONS = REGION_DETAILS.keys() +VALID_EC2_REGIONS = [r for r in VALID_EC2_REGIONS if r != 'nimbus'] -EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) -EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) -EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) -EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) -EC2_AP_NORTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) class EC2NodeLocation(NodeLocation): def __init__(self, id, name, country, driver, availability_zone): @@ -146,12 +1520,14 @@ return (('') % (self.id, self.name, self.country, - self.availability_zone.name, self.driver.name)) + self.availability_zone, self.driver.name)) + class EC2Response(AWSBaseResponse): """ EC2 specific response parsing and error handling. """ + def parse_error(self): err_list = [] # Okay, so for Eucalyptus, you can get a 403, with no body, @@ -163,65 +1539,45 @@ try: body = ET.XML(self.body) except: - raise MalformedResponseError("Failed to parse XML", body=self.body, driver=EC2NodeDriver) + raise MalformedResponseError("Failed to parse XML", + body=self.body, driver=EC2NodeDriver) for err in body.findall('Errors/Error'): code, message = err.getchildren() - err_list.append("%s: %s" % (code.text, message.text)) - if code.text == "InvalidClientTokenId": + err_list.append('%s: %s' % (code.text, message.text)) + if code.text == 'InvalidClientTokenId': raise InvalidCredsError(err_list[-1]) - if code.text == "SignatureDoesNotMatch": + if code.text == 'SignatureDoesNotMatch': raise InvalidCredsError(err_list[-1]) - if code.text == "AuthFailure": + if code.text == 'AuthFailure': raise InvalidCredsError(err_list[-1]) - if code.text == "OptInRequired": + if code.text == 'OptInRequired': raise InvalidCredsError(err_list[-1]) - if code.text == "IdempotentParameterMismatch": + if code.text == 'IdempotentParameterMismatch': raise IdempotentParamError(err_list[-1]) - return "\n".join(err_list) + if code.text == 'InvalidKeyPair.NotFound': + # TODO: Use connection context instead + match = re.match(r'.*\'(.+?)\'.*', message.text) -class EC2Connection(ConnectionUserAndKey): - """ - Repersents a single connection to the EC2 Endpoint - """ - - host = EC2_US_EAST_HOST - responseCls = EC2Response + if match: + name = match.groups()[0] + else: + name = None - def add_default_params(self, params): - params['SignatureVersion'] = '2' - params['SignatureMethod'] = 'HmacSHA256' - params['AWSAccessKeyId'] = self.user_id - params['Version'] = API_VERSION - params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', - time.gmtime()) - params['Signature'] = self._get_aws_auth_param(params, self.key, self.action) - return params + raise KeyPairDoesNotExistError(name=name, + driver=self.connection.driver) + return '\n'.join(err_list) - def _get_aws_auth_param(self, params, secret_key, path='/'): - """ - Creates the signature required for AWS, per - http://bit.ly/aR7GaQ [docs.amazonwebservices.com]: - StringToSign = HTTPVerb + "\n" + - ValueOfHostHeaderInLowercase + "\n" + - HTTPRequestURI + "\n" + - CanonicalizedQueryString - """ - keys = params.keys() - keys.sort() - pairs = [] - for key in keys: - pairs.append(urllib.quote(key, safe='') + '=' + - urllib.quote(params[key], safe='-_~')) +class EC2Connection(SignedAWSConnection): + """ + Represents a single connection to the EC2 Endpoint. + """ - qs = '&'.join(pairs) - string_to_sign = '\n'.join(('GET', self.host, path, qs)) + version = API_VERSION + host = REGION_DETAILS['us-east-1']['endpoint'] + responseCls = EC2Response - b64_hmac = base64.b64encode( - hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() - ) - return b64_hmac class ExEC2AvailabilityZone(object): """ @@ -229,6 +1585,7 @@ Note: This class is EC2 specific. """ + def __init__(self, name, zone_state, region_name): self.name = name self.zone_state = zone_state @@ -239,699 +1596,3762 @@ 'region_name=%s>') % (self.name, self.zone_state, self.region_name)) -class EC2NodeDriver(NodeDriver): + +class EC2ReservedNode(Node): """ - Amazon EC2 node driver + Class which stores information about EC2 reserved instances/nodes + Inherits from Node and passes in None for name and private/public IPs + + Note: This class is EC2 specific. """ - connectionCls = EC2Connection - type = Provider.EC2 - api_name = 'ec2_us_east' - name = 'Amazon EC2 (us-east-1)' - friendly_name = 'Amazon US N. Virginia' - country = 'US' - region_name = 'us-east-1' - path = '/' + def __init__(self, id, state, driver, size=None, image=None, extra=None): + super(EC2ReservedNode, self).__init__(id=id, name=None, state=state, + public_ips=None, + private_ips=None, + driver=driver, extra=extra) - _instance_types = EC2_US_EAST_INSTANCE_TYPES + def __repr__(self): + return (('') % (self.id)) - NODE_STATE_MAP = { - 'pending': NodeState.PENDING, - 'running': NodeState.RUNNING, - 'shutting-down': NodeState.TERMINATED, - 'terminated': NodeState.TERMINATED - } - def _pathlist(self, key, arr): - """ - Converts a key and an array of values into AWS query param format. - """ - params = {} - i = 0 - for value in arr: - i += 1 - params["%s.%s" % (key, i)] = value - return params +class EC2SecurityGroup(object): + """ + Represents information about a Security group - def _get_boolean(self, element): - tag = "{%s}%s" % (NAMESPACE, 'return') - return element.findtext(tag) == 'true' + Note: This class is EC2 specific. + """ + + def __init__(self, id, name, ingress_rules, egress_rules, extra=None): + self.id = id + self.name = name + self.ingress_rules = ingress_rules + self.egress_rules = egress_rules + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.ip, self.domain, self.instance_id)) + + +class VPCInternetGateway(object): + """ + Class which stores information about VPC Internet Gateways. + + Note: This class is VPC specific. + """ + + def __init__(self, id, name, vpc_id, state, driver, extra=None): + self.id = id + self.name = name + self.vpc_id = vpc_id + self.state = state + self.extra = extra or {} + + def __repr__(self): + return (('') % (self.id)) + + +class EC2RouteTable(object): + """ + Class which stores information about VPC Route Tables. + + Note: This class is VPC specific. + """ + + def __init__(self, id, name, routes, subnet_associations, + propagating_gateway_ids, extra=None): + """ + :param id: The ID of the route table. + :type id: ``str`` + + :param name: The name of the route table. + :type name: ``str`` + + :param routes: A list of routes in the route table. + :type routes: ``list`` of :class:`EC2Route` + + :param subnet_associations: A list of associations between the + route table and one or more subnets. + :type subnet_associations: ``list`` of + :class:`EC2SubnetAssociation` + + :param propagating_gateway_ids: The list of IDs of any virtual + private gateways propagating the + routes. + :type propagating_gateway_ids: ``list`` + """ + + self.id = id + self.name = name + self.routes = routes + self.subnet_associations = subnet_associations + self.propagating_gateway_ids = propagating_gateway_ids + self.extra = extra or {} + + def __repr__(self): + return (('') % (self.id)) + + +class EC2Route(object): + """ + Class which stores information about a Route. + + Note: This class is VPC specific. + """ + + def __init__(self, cidr, gateway_id, instance_id, owner_id, + interface_id, state, origin, vpc_peering_connection_id): + """ + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param gateway_id: The ID of a gateway attached to the VPC. + :type gateway_id: ``str`` + + :param instance_id: The ID of a NAT instance in the VPC. + :type instance_id: ``str`` + + :param owner_id: The AWS account ID of the owner of the instance. + :type owner_id: ``str`` + + :param interface_id: The ID of the network interface. + :type interface_id: ``str`` + + :param state: The state of the route (active | blackhole). + :type state: ``str`` + + :param origin: Describes how the route was created. + :type origin: ``str`` + + :param vpc_peering_connection_id: The ID of the VPC + peering connection. + :type vpc_peering_connection_id: ``str`` + """ + + self.cidr = cidr + self.gateway_id = gateway_id + self.instance_id = instance_id + self.owner_id = owner_id + self.interface_id = interface_id + self.state = state + self.origin = origin + self.vpc_peering_connection_id = vpc_peering_connection_id + + def __repr__(self): + return (('') % (self.cidr)) + + +class EC2SubnetAssociation(object): + """ + Class which stores information about Route Table associated with + a given Subnet in a VPC + + Note: This class is VPC specific. + """ + + def __init__(self, id, route_table_id, subnet_id, main=False): + """ + :param id: The ID of the subent association in the VPC. + :type id: ``str`` + + :param route_table_id: The ID of a route table in the VPC. + :type route_table_id: ``str`` + + :param subnet_id: The ID of a subnet in the VPC. + :type subnet_id: ``str`` + + :param main: If true, means this is a main VPC route table. + :type main: ``bool`` + """ + + self.id = id + self.route_table_id = route_table_id + self.subnet_id = subnet_id + self.main = main + + def __repr__(self): + return (('') % (self.id)) + + +class BaseEC2NodeDriver(NodeDriver): + """ + Base Amazon EC2 node driver. + + Used for main EC2 and other derivate driver classes to inherit from it. + """ + + connectionCls = EC2Connection + features = {'create_node': ['ssh_key']} + path = '/' + + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.UNKNOWN, + 'terminated': NodeState.TERMINATED + } + + def list_nodes(self, ex_node_ids=None, ex_filters=None): + """ + List all nodes + + Ex_node_ids parameter is used to filter the list of + nodes that should be returned. Only the nodes + with the corresponding node ids will be returned. + + :param ex_node_ids: List of ``node.id`` + :type ex_node_ids: ``list`` of ``str`` + + :param ex_filters: The filters so that the response includes + information for only certain nodes. + :type ex_filters: ``dict`` + + :rtype: ``list`` of :class:`Node` + """ + + params = {'Action': 'DescribeInstances'} + + if ex_node_ids: + params.update(self._pathlist('InstanceId', ex_node_ids)) + + if ex_filters: + params.update(self._build_filters(ex_filters)) + + elem = self.connection.request(self.path, params=params).object + + nodes = [] + for rs in findall(element=elem, xpath='reservationSet/item', + namespace=NAMESPACE): + nodes += self._to_nodes(rs, 'instancesSet/item') + + nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) + + for node in nodes: + ips = nodes_elastic_ips_mappings[node.id] + node.public_ips.extend(ips) + + return nodes + + def list_sizes(self, location=None): + available_types = REGION_DETAILS[self.region_name]['instance_types'] + sizes = [] + + for instance_type in available_types: + attributes = INSTANCE_TYPES[instance_type] + attributes = copy.deepcopy(attributes) + price = self._get_size_price(size_id=instance_type) + attributes.update({'price': price}) + sizes.append(NodeSize(driver=self, **attributes)) + return sizes + + def list_images(self, location=None, ex_image_ids=None, ex_owner=None, + ex_executableby=None): + """ + List all images + @inherits: :class:`NodeDriver.list_images` + + Ex_image_ids parameter is used to filter the list of + images that should be returned. Only the images + with the corresponding image ids will be returned. + + Ex_owner parameter is used to filter the list of + images that should be returned. Only the images + with the corresponding owner will be returned. + Valid values: amazon|aws-marketplace|self|all|aws id + + Ex_executableby parameter describes images for which + the specified user has explicit launch permissions. + The user can be an AWS account ID, self to return + images for which the sender of the request has + explicit launch permissions, or all to return + images with public launch permissions. + Valid values: all|self|aws id + + :param ex_image_ids: List of ``NodeImage.id`` + :type ex_image_ids: ``list`` of ``str`` + + :param ex_owner: Owner name + :type ex_owner: ``str`` + + :param ex_executableby: Executable by + :type ex_executableby: ``str`` + + :rtype: ``list`` of :class:`NodeImage` + """ + params = {'Action': 'DescribeImages'} + + if ex_owner: + params.update({'Owner.1': ex_owner}) + + if ex_executableby: + params.update({'ExecutableBy.1': ex_executableby}) + + if ex_image_ids: + for index, image_id in enumerate(ex_image_ids): + index += 1 + params.update({'ImageId.%s' % (index): image_id}) + + images = self._to_images( + self.connection.request(self.path, params=params).object + ) + return images + + def get_image(self, image_id): + """ + Get an image based on a image_id + + :param image_id: Image identifier + :type image_id: ``str`` + + :return: A NodeImage object + :rtype: :class:`NodeImage` + + """ + images = self.list_images(ex_image_ids=[image_id]) + image = images[0] + + return image + + def list_locations(self): + locations = [] + for index, availability_zone in \ + enumerate(self.ex_list_availability_zones()): + locations.append(EC2NodeLocation( + index, availability_zone.name, self.country, self, + availability_zone) + ) + return locations + + def list_volumes(self, node=None): + params = { + 'Action': 'DescribeVolumes', + } + if node: + filters = {'attachment.instance-id': node.id} + params.update(self._build_filters(filters)) + + response = self.connection.request(self.path, params=params).object + volumes = [self._to_volume(el) for el in response.findall( + fixxpath(xpath='volumeSet/item', namespace=NAMESPACE)) + ] + return volumes + + def create_node(self, **kwargs): + """ + Create a new EC2 node. + + Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com] + + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_keyname: The name of the key pair + :type ex_keyname: ``str`` + + :keyword ex_userdata: User data + :type ex_userdata: ``str`` + + :keyword ex_security_groups: A list of names of security groups to + assign to the node. + :type ex_security_groups: ``list`` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_mincount: Minimum number of instances to launch + :type ex_mincount: ``int`` + + :keyword ex_maxcount: Maximum number of instances to launch + :type ex_maxcount: ``int`` + + :keyword ex_clienttoken: Unique identifier to ensure idempotency + :type ex_clienttoken: ``str`` + + :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device + mappings. + :type ex_blockdevicemappings: ``list`` of ``dict`` + + :keyword ex_iamprofile: Name or ARN of IAM profile + :type ex_iamprofile: ``str`` + + :keyword ex_ebs_optimized: EBS-Optimized if True + :type ex_ebs_optimized: ``bool`` + + :keyword ex_subnet: The subnet to launch the instance into. + :type ex_subnet: :class:`.EC2Subnet` + """ + image = kwargs["image"] + size = kwargs["size"] + params = { + 'Action': 'RunInstances', + 'ImageId': image.id, + 'MinCount': str(kwargs.get('ex_mincount', '1')), + 'MaxCount': str(kwargs.get('ex_maxcount', '1')), + 'InstanceType': size.id + } + + if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs: + raise ValueError('You can only supply ex_security_groups or' + ' ex_securitygroup') + + # ex_securitygroup is here for backward compatibility + ex_security_groups = kwargs.get('ex_security_groups', None) + ex_securitygroup = kwargs.get('ex_securitygroup', None) + security_groups = ex_security_groups or ex_securitygroup + + if security_groups: + if not isinstance(security_groups, (tuple, list)): + security_groups = [security_groups] + + for sig in range(len(security_groups)): + params['SecurityGroup.%d' % (sig + 1,)] =\ + security_groups[sig] + + if 'location' in kwargs: + availability_zone = getattr(kwargs['location'], + 'availability_zone', None) + if availability_zone: + if availability_zone.region_name != self.region_name: + raise AttributeError('Invalid availability zone: %s' + % (availability_zone.name)) + params['Placement.AvailabilityZone'] = availability_zone.name + + if 'auth' in kwargs and 'ex_keyname' in kwargs: + raise AttributeError('Cannot specify auth and ex_keyname together') + + if 'auth' in kwargs: + auth = self._get_and_check_auth(kwargs['auth']) + key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey) + params['KeyName'] = key['keyName'] + + if 'ex_keyname' in kwargs: + params['KeyName'] = kwargs['ex_keyname'] + + if 'ex_userdata' in kwargs: + params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\ + .decode('utf-8') + + if 'ex_clienttoken' in kwargs: + params['ClientToken'] = kwargs['ex_clienttoken'] + + if 'ex_blockdevicemappings' in kwargs: + params.update(self._get_block_device_mapping_params( + kwargs['ex_blockdevicemappings'])) + + if 'ex_iamprofile' in kwargs: + if not isinstance(kwargs['ex_iamprofile'], basestring): + raise AttributeError('ex_iamprofile not string') + + if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'): + params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile'] + else: + params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile'] + + if 'ex_ebs_optimized' in kwargs: + params['EbsOptimized'] = kwargs['ex_ebs_optimized'] + + if 'ex_subnet' in kwargs: + params['SubnetId'] = kwargs['ex_subnet'].id + + object = self.connection.request(self.path, params=params).object + nodes = self._to_nodes(object, 'instancesSet/item') + + for node in nodes: + tags = {'Name': kwargs['name']} + if 'ex_metadata' in kwargs: + tags.update(kwargs['ex_metadata']) + + try: + self.ex_create_tags(resource=node, tags=tags) + except Exception: + continue + + node.name = kwargs['name'] + node.extra.update({'tags': tags}) + + if len(nodes) == 1: + return nodes[0] + else: + return nodes + + def reboot_node(self, node): + params = {'Action': 'RebootInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_boolean(res) + + def destroy_node(self, node): + params = {'Action': 'TerminateInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_terminate_boolean(res) + + def create_volume(self, size, name, location=None, snapshot=None, + ex_volume_type='standard', ex_iops=None): + """ + :param location: Datacenter in which to create a volume in. + :type location: :class:`.ExEC2AvailabilityZone` + + :param ex_volume_type: Type of volume to create. + :type ex_volume_type: ``str`` + + :param iops: The number of I/O operations per second (IOPS) + that the volume supports. Only used if ex_volume_type + is io1. + :type iops: ``int`` + """ + valid_volume_types = ['standard', 'io1', 'g2'] + + params = { + 'Action': 'CreateVolume', + 'Size': str(size)} + + if ex_volume_type and ex_volume_type not in valid_volume_types: + raise ValueError('Invalid volume type specified: %s' % + (ex_volume_type)) + + if location is not None: + params['AvailabilityZone'] = location.availability_zone.name + + if ex_volume_type: + params['VolumeType'] = ex_volume_type + + if ex_volume_type == 'io1' and ex_iops: + params['Iops'] = ex_iops + + volume = self._to_volume( + self.connection.request(self.path, params=params).object, + name=name) + + if self.ex_create_tags(volume, {'Name': name}): + volume.extra['tags']['Name'] = name + + return volume + + def attach_volume(self, node, volume, device): + params = { + 'Action': 'AttachVolume', + 'VolumeId': volume.id, + 'InstanceId': node.id, + 'Device': device} + + self.connection.request(self.path, params=params) + return True + + def detach_volume(self, volume): + params = { + 'Action': 'DetachVolume', + 'VolumeId': volume.id} + + self.connection.request(self.path, params=params) + return True + + def destroy_volume(self, volume): + params = { + 'Action': 'DeleteVolume', + 'VolumeId': volume.id} + response = self.connection.request(self.path, params=params).object + return self._get_boolean(response) + + def create_volume_snapshot(self, volume, name=None): + """ + Create snapshot from volume + + :param volume: Instance of ``StorageVolume`` + :type volume: ``StorageVolume`` + + :param name: Name of snapshot + :type name: ``str`` + + :rtype: :class:`VolumeSnapshot` + """ + params = { + 'Action': 'CreateSnapshot', + 'VolumeId': volume.id, + } + + if name: + params.update({ + 'Description': name, + }) + response = self.connection.request(self.path, params=params).object + snapshot = self._to_snapshot(response, name) + + if name and self.ex_create_tags(snapshot, {'Name': name}): + snapshot.extra['tags']['Name'] = name + + return snapshot + + def list_volume_snapshots(self, snapshot): + return self.list_snapshots(snapshot) + + def list_snapshots(self, snapshot=None, owner=None): + """ + Describe all snapshots. + + :param snapshot: If provided, only return snapshot information for the + provided snapshot. + + :param owner: Owner for snapshot: self|amazon|ID + :type owner: ``str`` + + :rtype: ``list`` of :class:`VolumeSnapshot` + """ + params = { + 'Action': 'DescribeSnapshots', + } + if snapshot: + params.update({ + 'SnapshotId.1': snapshot.id, + }) + if owner: + params.update({ + 'Owner.1': owner, + }) + response = self.connection.request(self.path, params=params).object + snapshots = self._to_snapshots(response) + return snapshots + + def destroy_volume_snapshot(self, snapshot): + params = { + 'Action': 'DeleteSnapshot', + 'SnapshotId': snapshot.id + } + response = self.connection.request(self.path, params=params).object + return self._get_boolean(response) + + # Key pair management methods + + def list_key_pairs(self): + params = { + 'Action': 'DescribeKeyPairs' + } + + response = self.connection.request(self.path, params=params) + elems = findall(element=response.object, xpath='keySet/item', + namespace=NAMESPACE) + + key_pairs = self._to_key_pairs(elems=elems) + return key_pairs + + def get_key_pair(self, name): + params = { + 'Action': 'DescribeKeyPairs', + 'KeyName': name + } + + response = self.connection.request(self.path, params=params) + elems = findall(element=response.object, xpath='keySet/item', + namespace=NAMESPACE) + + key_pair = self._to_key_pairs(elems=elems)[0] + return key_pair + + def create_key_pair(self, name): + params = { + 'Action': 'CreateKeyPair', + 'KeyName': name + } + + response = self.connection.request(self.path, params=params) + elem = response.object + key_pair = self._to_key_pair(elem=elem) + return key_pair + + def import_key_pair_from_string(self, name, key_material): + base64key = ensure_string(base64.b64encode(b(key_material))) + + params = { + 'Action': 'ImportKeyPair', + 'KeyName': name, + 'PublicKeyMaterial': base64key + } + + response = self.connection.request(self.path, params=params) + elem = response.object + key_pair = self._to_key_pair(elem=elem) + return key_pair + + def delete_key_pair(self, key_pair): + params = { + 'Action': 'DeleteKeyPair', + 'KeyName': key_pair.name + } + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def copy_image(self, image, source_region, name=None, description=None): + """ + Copy an Amazon Machine Image from the specified source region + to the current region. + + @inherits: :class:`NodeDriver.copy_image` + + :param source_region: The region where the image resides + :type source_region: ``str`` + + :param image: Instance of class NodeImage + :type image: :class:`NodeImage` + + :param name: The name of the new image + :type name: ``str`` + + :param description: The description of the new image + :type description: ``str`` + + :return: Instance of class ``NodeImage`` + :rtype: :class:`NodeImage` + """ + params = {'Action': 'CopyImage', + 'SourceRegion': source_region, + 'SourceImageId': image.id} + + if name is not None: + params['Name'] = name + + if description is not None: + params['Description'] = description + + image = self._to_image( + self.connection.request(self.path, params=params).object) + + return image + + def create_image(self, node, name, description=None, reboot=False, + block_device_mapping=None): + """ + Create an Amazon Machine Image based off of an EBS-backed instance. + + @inherits: :class:`NodeDriver.create_image` + + :param node: Instance of ``Node`` + :type node: :class: `Node` + + :param name: The name for the new image + :type name: ``str`` + + :param block_device_mapping: A dictionary of the disk layout + An example of this dict is included + below. + :type block_device_mapping: ``list`` of ``dict`` + + :param reboot: Whether or not to shutdown the instance before + creation. Amazon calls this NoReboot and + sets it to false by default to ensure a + clean image. + :type reboot: ``bool`` + + :param description: An optional description for the new image + :type description: ``str`` + + An example block device mapping dictionary is included: + + mapping = [{'VirtualName': None, + 'Ebs': {'VolumeSize': 10, + 'VolumeType': 'standard', + 'DeleteOnTermination': 'true'}, + 'DeviceName': '/dev/sda1'}] + + :return: Instance of class ``NodeImage`` + :rtype: :class:`NodeImage` + """ + params = {'Action': 'CreateImage', + 'InstanceId': node.id, + 'Name': name, + 'NoReboot': not reboot} + + if description is not None: + params['Description'] = description + + if block_device_mapping is not None: + params.update(self._get_block_device_mapping_params( + block_device_mapping)) + + image = self._to_image( + self.connection.request(self.path, params=params).object) + + return image + + def delete_image(self, image): + """ + Deletes an image at Amazon given a NodeImage object + + @inherits: :class:`NodeDriver.delete_image` + + :param image: Instance of ``NodeImage`` + :type image: :class: `NodeImage` + + :rtype: ``bool`` + """ + params = {'Action': 'DeregisterImage', + 'ImageId': image.id} + + response = self.connection.request(self.path, params=params).object + return self._get_boolean(response) + + def ex_register_image(self, name, description=None, architecture=None, + image_location=None, root_device_name=None, + block_device_mapping=None, kernel_id=None, + ramdisk_id=None): + """ + Registers an Amazon Machine Image based off of an EBS-backed instance. + Can also be used to create images from snapshots. More information + can be found at http://goo.gl/hqZq0a. + + :param name: The name for the AMI being registered + :type name: ``str`` + + :param description: The description of the AMI (optional) + :type description: ``str`` + + :param architecture: The architecture of the AMI (i386/x86_64) + (optional) + :type architecture: ``str`` + + :param image_location: The location of the AMI within Amazon S3 + Required if registering an instance + store-backed AMI + :type image_location: ``str`` + + :param root_device_name: The device name for the root device + Required if registering a EBS-backed AMI + :type root_device_name: ``str`` + + :param block_device_mapping: A dictionary of the disk layout + (optional) + :type block_device_mapping: ``dict`` + + :param kernel_id: Kernel id for AMI (optional) + :type kernel_id: ``str`` + + :param ramdisk_id: RAM disk for AMI (optional) + :type ramdisk_id: ``str`` + + :rtype: :class:`NodeImage` + """ + + params = {'Action': 'RegisterImage', + 'Name': name} + + if description is not None: + params['Description'] = description + + if architecture is not None: + params['Architecture'] = architecture + + if image_location is not None: + params['ImageLocation'] = image_location + + if root_device_name is not None: + params['RootDeviceName'] = root_device_name + + if block_device_mapping is not None: + params.update(self._get_block_device_mapping_params( + block_device_mapping)) + + if kernel_id is not None: + params['KernelId'] = kernel_id + + if ramdisk_id is not None: + params['RamDiskId'] = ramdisk_id + + image = self._to_image( + self.connection.request(self.path, params=params).object + ) + return image + + def ex_list_networks(self, network_ids=None, filters=None): + """ + Return a list of :class:`EC2Network` objects for the + current region. + + :param network_ids: Return only networks matching the provided + network IDs. If not specified, a list of all + the networks in the corresponding region + is returned. + :type network_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain networks. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`EC2Network` + """ + params = {'Action': 'DescribeVpcs'} + + if network_ids: + params.update(self._pathlist('VpcId', network_ids)) + + if filters: + params.update(self._build_filters(filters)) + + return self._to_networks( + self.connection.request(self.path, params=params).object + ) + + def ex_create_network(self, cidr_block, name=None, + instance_tenancy='default'): + """ + Create a network/VPC + + :param cidr_block: The CIDR block assigned to the network + :type cidr_block: ``str`` + + :param name: An optional name for the network + :type name: ``str`` + + :param instance_tenancy: The allowed tenancy of instances launched + into the VPC. + Valid values: default/dedicated + :type instance_tenancy: ``str`` + + :return: Dictionary of network properties + :rtype: ``dict`` + """ + params = {'Action': 'CreateVpc', + 'CidrBlock': cidr_block, + 'InstanceTenancy': instance_tenancy} + + response = self.connection.request(self.path, params=params).object + element = response.findall(fixxpath(xpath='vpc', + namespace=NAMESPACE))[0] + + network = self._to_network(element, name) + + if name and self.ex_create_tags(network, {'Name': name}): + network.extra['tags']['Name'] = name + + return network + + def ex_delete_network(self, vpc): + """ + Deletes a network/VPC. + + :param vpc: VPC to delete. + :type vpc: :class:`.EC2Network` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteVpc', 'VpcId': vpc.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_list_subnets(self, subnet_ids=None, filters=None): + """ + Return a list of :class:`EC2NetworkSubnet` objects for the + current region. + + :param subnet_ids: Return only subnets matching the provided + subnet IDs. If not specified, a list of all + the subnets in the corresponding region + is returned. + :type subnet_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain subnets. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`EC2NetworkSubnet` + """ + params = {'Action': 'DescribeSubnets'} + + if subnet_ids: + params.update(self._pathlist('SubnetId', subnet_ids)) + + if filters: + params.update(self._build_filters(filters)) + + return self._to_subnets( + self.connection.request(self.path, params=params).object + ) + + def ex_create_subnet(self, vpc_id, cidr_block, + availability_zone, name=None): + """ + Create a network subnet within a VPC + + :param vpc_id: The ID of the VPC that the subnet should be + associated with + :type vpc_id: ``str`` + + :param cidr_block: The CIDR block assigned to the subnet + :type cidr_block: ``str`` + + :param availability_zone: The availability zone where the subnet + should reside + :type availability_zone: ``str`` + + :param name: An optional name for the network + :type name: ``str`` + + :rtype: :class: `EC2NetworkSubnet` + """ + params = {'Action': 'CreateSubnet', + 'VpcId': vpc_id, + 'CidrBlock': cidr_block, + 'AvailabilityZone': availability_zone} + + response = self.connection.request(self.path, params=params).object + element = response.findall(fixxpath(xpath='subnet', + namespace=NAMESPACE))[0] + + subnet = self._to_subnet(element, name) + + if name and self.ex_create_tags(subnet, {'Name': name}): + subnet.extra['tags']['Name'] = name + + return subnet + + def ex_delete_subnet(self, subnet): + """ + Deletes a VPC subnet. + + :param subnet: The subnet to delete + :type subnet: :class:`.EC2NetworkSubnet` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_list_security_groups(self): + """ + List existing Security Groups. + + @note: This is a non-standard extension API, and only works for EC2. + + :rtype: ``list`` of ``str`` + """ + params = {'Action': 'DescribeSecurityGroups'} + response = self.connection.request(self.path, params=params).object + + groups = [] + for group in findall(element=response, xpath='securityGroupInfo/item', + namespace=NAMESPACE): + name = findtext(element=group, xpath='groupName', + namespace=NAMESPACE) + groups.append(name) + + return groups + + def ex_get_security_groups(self, group_ids=None, + group_names=None, filters=None): + """ + Return a list of :class:`EC2SecurityGroup` objects for the + current region. + + :param group_ids: Return only groups matching the provided + group IDs. + :type group_ids: ``list`` + + :param group_names: Return only groups matching the provided + group names. + :type group_ids: ``list`` + + :param filters: The filters so that the response includes + information for only specific security groups. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`EC2SecurityGroup` + """ + + params = {'Action': 'DescribeSecurityGroups'} + + if group_ids: + params.update(self._pathlist('GroupId', group_ids)) + + if group_names: + for name_idx, group_name in enumerate(group_names): + name_idx += 1 # We want 1-based indexes + name_key = 'GroupName.%s' % (name_idx) + params[name_key] = group_name + + if filters: + params.update(self._build_filters(filters)) + + response = self.connection.request(self.path, params=params) + return self._to_security_groups(response.object) + + def ex_create_security_group(self, name, description, vpc_id=None): + """ + Creates a new Security Group in EC2-Classic or a targeted VPC. + + :param name: The name of the security group to Create. + This must be unique. + :type name: ``str`` + + :param description: Human readable description of a Security + Group. + :type description: ``str`` + + :param vpc_id: Optional identifier for VPC networks + :type vpc_id: ``str`` + + :rtype: ``dict`` + """ + params = {'Action': 'CreateSecurityGroup', + 'GroupName': name, + 'GroupDescription': description} + + if vpc_id is not None: + params['VpcId'] = vpc_id + + response = self.connection.request(self.path, params=params).object + group_id = findattr(element=response, xpath='groupId', + namespace=NAMESPACE) + return { + 'group_id': group_id + } + + def ex_delete_security_group_by_id(self, group_id): + """ + Deletes a new Security Group using the group id. + + :param group_id: The ID of the security group + :type group_id: ``str`` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_delete_security_group_by_name(self, group_name): + """ + Deletes a new Security Group using the group name. + + :param group_name: The name of the security group + :type group_name: ``str`` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_delete_security_group(self, name): + """ + Wrapper method which calls ex_delete_security_group_by_name. + + :param name: The name of the security group + :type name: ``str`` + + :rtype: ``bool`` + """ + return self.ex_delete_security_group_by_name(name) + + def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip, + protocol='tcp'): + """ + Edit a Security Group to allow specific traffic. + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the security group to edit + :type name: ``str`` + + :param from_port: The beginning of the port range to open + :type from_port: ``str`` + + :param to_port: The end of the port range to open + :type to_port: ``str`` + + :param cidr_ip: The ip to allow traffic for. + :type cidr_ip: ``str`` + + :param protocol: tcp/udp/icmp + :type protocol: ``str`` + + :rtype: ``bool`` + """ + + params = {'Action': 'AuthorizeSecurityGroupIngress', + 'GroupName': name, + 'IpProtocol': protocol, + 'FromPort': str(from_port), + 'ToPort': str(to_port), + 'CidrIp': cidr_ip} + try: + res = self.connection.request( + self.path, params=params.copy()).object + return self._get_boolean(res) + except Exception: + e = sys.exc_info()[1] + if e.args[0].find('InvalidPermission.Duplicate') == -1: + raise e + + def ex_authorize_security_group_ingress(self, id, from_port, to_port, + cidr_ips=None, group_pairs=None, + protocol='tcp'): + """ + Edit a Security Group to allow specific ingress traffic using + CIDR blocks or either a group ID, group name or user ID (account). + + :param id: The id of the security group to edit + :type id: ``str`` + + :param from_port: The beginning of the port range to open + :type from_port: ``int`` + + :param to_port: The end of the port range to open + :type to_port: ``int`` + + :param cidr_ips: The list of ip ranges to allow traffic for. + :type cidr_ips: ``list`` + + :param group_pairs: Source user/group pairs to allow traffic for. + More info can be found at http://goo.gl/stBHJF + + EC2 Classic Example: To allow access from any system + associated with the default group on account 1234567890 + + [{'group_name': 'default', 'user_id': '1234567890'}] + + VPC Example: Allow access from any system associated with + security group sg-47ad482e on your own account + + [{'group_id': ' sg-47ad482e'}] + :type group_pairs: ``list`` of ``dict`` + + :param protocol: tcp/udp/icmp + :type protocol: ``str`` + + :rtype: ``bool`` + """ + + params = self._get_common_security_group_params(id, + protocol, + from_port, + to_port, + cidr_ips, + group_pairs) + + params["Action"] = 'AuthorizeSecurityGroupIngress' + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_authorize_security_group_egress(self, id, from_port, to_port, + cidr_ips, group_pairs=None, + protocol='tcp'): + """ + Edit a Security Group to allow specific egress traffic using + CIDR blocks or either a group ID, group name or user ID (account). + This call is not supported for EC2 classic and only works for VPC + groups. + + :param id: The id of the security group to edit + :type id: ``str`` + + :param from_port: The beginning of the port range to open + :type from_port: ``int`` + + :param to_port: The end of the port range to open + :type to_port: ``int`` + + :param cidr_ips: The list of ip ranges to allow traffic for. + :type cidr_ips: ``list`` + + :param group_pairs: Source user/group pairs to allow traffic for. + More info can be found at http://goo.gl/stBHJF + + EC2 Classic Example: To allow access from any system + associated with the default group on account 1234567890 + + [{'group_name': 'default', 'user_id': '1234567890'}] + + VPC Example: Allow access from any system associated with + security group sg-47ad482e on your own account + + [{'group_id': ' sg-47ad482e'}] + :type group_pairs: ``list`` of ``dict`` + + :param protocol: tcp/udp/icmp + :type protocol: ``str`` + + :rtype: ``bool`` + """ + + params = self._get_common_security_group_params(id, + protocol, + from_port, + to_port, + cidr_ips, + group_pairs) + + params["Action"] = 'AuthorizeSecurityGroupEgress' + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_revoke_security_group_ingress(self, id, from_port, to_port, + cidr_ips=None, group_pairs=None, + protocol='tcp'): + """ + Edit a Security Group to revoke specific ingress traffic using + CIDR blocks or either a group ID, group name or user ID (account). + + :param id: The id of the security group to edit + :type id: ``str`` + + :param from_port: The beginning of the port range to open + :type from_port: ``int`` + + :param to_port: The end of the port range to open + :type to_port: ``int`` + + :param cidr_ips: The list of ip ranges to allow traffic for. + :type cidr_ips: ``list`` + + :param group_pairs: Source user/group pairs to allow traffic for. + More info can be found at http://goo.gl/stBHJF + + EC2 Classic Example: To allow access from any system + associated with the default group on account 1234567890 + + [{'group_name': 'default', 'user_id': '1234567890'}] + + VPC Example: Allow access from any system associated with + security group sg-47ad482e on your own account + + [{'group_id': ' sg-47ad482e'}] + :type group_pairs: ``list`` of ``dict`` + + :param protocol: tcp/udp/icmp + :type protocol: ``str`` + + :rtype: ``bool`` + """ + + params = self._get_common_security_group_params(id, + protocol, + from_port, + to_port, + cidr_ips, + group_pairs) + + params["Action"] = 'RevokeSecurityGroupIngress' + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_revoke_security_group_egress(self, id, from_port, to_port, + cidr_ips=None, group_pairs=None, + protocol='tcp'): + """ + Edit a Security Group to revoke specific egress traffic using + CIDR blocks or either a group ID, group name or user ID (account). + This call is not supported for EC2 classic and only works for + VPC groups. + + :param id: The id of the security group to edit + :type id: ``str`` + + :param from_port: The beginning of the port range to open + :type from_port: ``int`` + + :param to_port: The end of the port range to open + :type to_port: ``int`` + + :param cidr_ips: The list of ip ranges to allow traffic for. + :type cidr_ips: ``list`` + + :param group_pairs: Source user/group pairs to allow traffic for. + More info can be found at http://goo.gl/stBHJF + + EC2 Classic Example: To allow access from any system + associated with the default group on account 1234567890 + + [{'group_name': 'default', 'user_id': '1234567890'}] + + VPC Example: Allow access from any system associated with + security group sg-47ad482e on your own account + + [{'group_id': ' sg-47ad482e'}] + :type group_pairs: ``list`` of ``dict`` + + :param protocol: tcp/udp/icmp + :type protocol: ``str`` + + :rtype: ``bool`` + """ + + params = self._get_common_security_group_params(id, + protocol, + from_port, + to_port, + cidr_ips, + group_pairs) + + params['Action'] = 'RevokeSecurityGroupEgress' + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_authorize_security_group_permissive(self, name): + """ + Edit a Security Group to allow all traffic. + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the security group to edit + :type name: ``str`` + + :rtype: ``list`` of ``str`` + """ + + results = [] + params = {'Action': 'AuthorizeSecurityGroupIngress', + 'GroupName': name, + 'IpProtocol': 'tcp', + 'FromPort': '0', + 'ToPort': '65535', + 'CidrIp': '0.0.0.0/0'} + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception: + e = sys.exc_info()[1] + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + params['IpProtocol'] = 'udp' + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception: + e = sys.exc_info()[1] + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + + params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'}) + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception: + e = sys.exc_info()[1] + + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + return results + + def ex_list_availability_zones(self, only_available=True): + """ + Return a list of :class:`ExEC2AvailabilityZone` objects for the + current region. + + Note: This is an extension method and is only available for EC2 + driver. + + :keyword only_available: If true, return only availability zones + with state 'available' + :type only_available: ``str`` + + :rtype: ``list`` of :class:`ExEC2AvailabilityZone` + """ + params = {'Action': 'DescribeAvailabilityZones'} + + filters = {'region-name': self.region_name} + if only_available: + filters['state'] = 'available' + + params.update(self._build_filters(filters)) + + result = self.connection.request(self.path, + params=params.copy()).object + + availability_zones = [] + for element in findall(element=result, + xpath='availabilityZoneInfo/item', + namespace=NAMESPACE): + name = findtext(element=element, xpath='zoneName', + namespace=NAMESPACE) + zone_state = findtext(element=element, xpath='zoneState', + namespace=NAMESPACE) + region_name = findtext(element=element, xpath='regionName', + namespace=NAMESPACE) + + availability_zone = ExEC2AvailabilityZone( + name=name, + zone_state=zone_state, + region_name=region_name + ) + availability_zones.append(availability_zone) + + return availability_zones + + def ex_describe_tags(self, resource): + """ + Return a dictionary of tags for a resource (Node or StorageVolume). + + :param resource: resource which should be used + :type resource: :class:`Node` or :class:`StorageVolume` + + :return: dict Node tags + :rtype: ``dict`` + """ + params = {'Action': 'DescribeTags'} + + filters = { + 'resource-id': resource.id, + 'resource-type': 'instance' + } + + params.update(self._build_filters(filters)) + + result = self.connection.request(self.path, params=params).object + + return self._get_resource_tags(result) + + def ex_create_tags(self, resource, tags): + """ + Create tags for a resource (Node or StorageVolume). + + :param resource: Resource to be tagged + :type resource: :class:`Node` or :class:`StorageVolume` + + :param tags: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + :type tags: ``dict`` + + :rtype: ``bool`` + """ + if not tags: + return + + params = {'Action': 'CreateTags', + 'ResourceId.0': resource.id} + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) + + def ex_delete_tags(self, resource, tags): + """ + Delete tags from a resource. + + :param resource: Resource to be tagged + :type resource: :class:`Node` or :class:`StorageVolume` + + :param tags: A dictionary or other mapping of strings to strings, + specifying the tag names and tag values to be deleted. + :type tags: ``dict`` + + :rtype: ``bool`` + """ + if not tags: + return + + params = {'Action': 'DeleteTags', + 'ResourceId.0': resource.id} + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) + + def ex_get_metadata_for_node(self, node): + """ + Return the metadata associated with the node. + + :param node: Node instance + :type node: :class:`Node` + + :return: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + :rtype tags: ``dict`` + """ + return node.extra['tags'] + + def ex_allocate_address(self, domain='standard'): + """ + Allocate a new Elastic IP address for EC2 classic or VPC + + :param domain: The domain to allocate the new address in + (standard/vpc) + :type domain: ``str`` + + :return: Instance of ElasticIP + :rtype: :class:`ElasticIP` + """ + params = {'Action': 'AllocateAddress'} + + if domain == 'vpc': + params['Domain'] = domain + + response = self.connection.request(self.path, params=params).object + + return self._to_address(response, only_associated=False) + + def ex_release_address(self, elastic_ip, domain=None): + """ + Release an Elastic IP address using the IP (EC2-Classic) or + using the allocation ID (VPC) + + :param elastic_ip: Elastic IP instance + :type elastic_ip: :class:`ElasticIP` + + :param domain: The domain where the IP resides (vpc only) + :type domain: ``str`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + params = {'Action': 'ReleaseAddress'} + + if domain is not None and domain != 'vpc': + raise AttributeError('Domain can only be set to vpc') + + if domain is None: + params['PublicIp'] = elastic_ip.ip + else: + params['AllocationId'] = elastic_ip.extra['allocation_id'] + + response = self.connection.request(self.path, params=params).object + return self._get_boolean(response) + + def ex_describe_all_addresses(self, only_associated=False): + """ + Return all the Elastic IP addresses for this account + optionally, return only addresses associated with nodes + + :param only_associated: If true, return only those addresses + that are associated with an instance. + :type only_associated: ``bool`` + + :return: List of ElasticIP instances. + :rtype: ``list`` of :class:`ElasticIP` + """ + params = {'Action': 'DescribeAddresses'} + + response = self.connection.request(self.path, params=params).object + + # We will send our only_associated boolean over to + # shape how the return data is sent back + return self._to_addresses(response, only_associated) + + def ex_associate_address_with_node(self, node, elastic_ip, domain=None): + """ + Associate an Elastic IP address with a particular node. + + :param node: Node instance + :type node: :class:`Node` + + :param elastic_ip: Elastic IP instance + :type elastic_ip: :class:`ElasticIP` + + :param domain: The domain where the IP resides (vpc only) + :type domain: ``str`` + + :return: A string representation of the association ID which is + required for VPC disassociation. EC2/standard + addresses return None + :rtype: ``None`` or ``str`` + """ + params = {'Action': 'AssociateAddress', 'InstanceId': node.id} + + if domain is not None and domain != 'vpc': + raise AttributeError('Domain can only be set to vpc') + + if domain is None: + params.update({'PublicIp': elastic_ip.ip}) + else: + params.update({'AllocationId': elastic_ip.extra['allocation_id']}) + + response = self.connection.request(self.path, params=params).object + association_id = findtext(element=response, + xpath='associationId', + namespace=NAMESPACE) + return association_id + + def ex_associate_addresses(self, node, elastic_ip, domain=None): + """ + Note: This method has been deprecated in favor of + the ex_associate_address_with_node method. + """ + + return self.ex_associate_address_with_node(node=node, + elastic_ip=elastic_ip, + domain=domain) + + def ex_disassociate_address(self, elastic_ip, domain=None): + """ + Disassociate an Elastic IP address using the IP (EC2-Classic) + or the association ID (VPC) + + :param elastic_ip: ElasticIP instance + :type elastic_ip: :class:`ElasticIP` + + :param domain: The domain where the IP resides (vpc only) + :type domain: ``str`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + params = {'Action': 'DisassociateAddress'} + + if domain is not None and domain != 'vpc': + raise AttributeError('Domain can only be set to vpc') + + if domain is None: + params['PublicIp'] = elastic_ip.ip + + else: + params['AssociationId'] = elastic_ip.extra['association_id'] + + res = self.connection.request(self.path, params=params).object + return self._get_boolean(res) + + def ex_describe_addresses(self, nodes): + """ + Return Elastic IP addresses for all the nodes in the provided list. + + :param nodes: List of :class:`Node` instances + :type nodes: ``list`` of :class:`Node` + + :return: Dictionary where a key is a node ID and the value is a + list with the Elastic IP addresses associated with + this node. + :rtype: ``dict`` + """ + if not nodes: + return {} + + params = {'Action': 'DescribeAddresses'} + + if len(nodes) == 1: + self._add_instance_filter(params, nodes[0]) + + result = self.connection.request(self.path, params=params).object + + node_instance_ids = [node.id for node in nodes] + nodes_elastic_ip_mappings = {} + + # We will set only_associated to True so that we only get back + # IPs which are associated with instances + only_associated = True + + for node_id in node_instance_ids: + nodes_elastic_ip_mappings.setdefault(node_id, []) + for addr in self._to_addresses(result, + only_associated): + + instance_id = addr.instance_id + + if node_id == instance_id: + nodes_elastic_ip_mappings[instance_id].append( + addr.ip) + + return nodes_elastic_ip_mappings + + def ex_describe_addresses_for_node(self, node): + """ + Return a list of Elastic IP addresses associated with this node. + + :param node: Node instance + :type node: :class:`Node` + + :return: list Elastic IP addresses attached to this node. + :rtype: ``list`` of ``str`` + """ + node_elastic_ips = self.ex_describe_addresses([node]) + return node_elastic_ips[node.id] + + # Network interface management methods + + def ex_list_network_interfaces(self): + """ + Return all network interfaces + + :return: List of EC2NetworkInterface instances + :rtype: ``list`` of :class `EC2NetworkInterface` + """ + params = {'Action': 'DescribeNetworkInterfaces'} + + return self._to_interfaces( + self.connection.request(self.path, params=params).object + ) + + def ex_create_network_interface(self, subnet, name=None, + description=None, + private_ip_address=None): + """ + Create a network interface within a VPC subnet. + + :param subnet: EC2NetworkSubnet instance + :type subnet: :class:`EC2NetworkSubnet` + + :param name: Optional name of the interface + :type name: ``str`` + + :param description: Optional description of the network interface + :type description: ``str`` + + :param private_ip_address: Optional address to assign as the + primary private IP address of the + interface. If one is not provided then + Amazon will automatically auto-assign + an available IP. EC2 allows assignment + of multiple IPs, but this will be + the primary. + :type private_ip_address: ``str`` + + :return: EC2NetworkInterface instance + :rtype: :class `EC2NetworkInterface` + """ + params = {'Action': 'CreateNetworkInterface', + 'SubnetId': subnet.id} + + if description: + params['Description'] = description + + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + + response = self.connection.request(self.path, params=params).object + + element = response.findall(fixxpath(xpath='networkInterface', + namespace=NAMESPACE))[0] + + interface = self._to_interface(element, name) + + if name and self.ex_create_tags(interface, {'Name': name}): + interface.extra['tags']['Name'] = name + + return interface + + def ex_delete_network_interface(self, network_interface): + """ + Deletes a network interface. + + :param network_interface: EC2NetworkInterface instance + :type network_interface: :class:`EC2NetworkInterface` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteNetworkInterface', + 'NetworkInterfaceId': network_interface.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_attach_network_interface_to_node(self, network_interface, + node, device_index): + """ + Attatch a network interface to an instance. + + :param network_interface: EC2NetworkInterface instance + :type network_interface: :class:`EC2NetworkInterface` + + :param node: Node instance + :type node: :class:`Node` + + :param device_index: The interface device index + :type device_index: ``int`` + + :return: String representation of the attachment id. + This is required to detach the interface. + :rtype: ``str`` + """ + params = {'Action': 'AttachNetworkInterface', + 'NetworkInterfaceId': network_interface.id, + 'InstanceId': node.id, + 'DeviceIndex': device_index} + + response = self.connection.request(self.path, params=params).object + attachment_id = findattr(element=response, xpath='attachmentId', + namespace=NAMESPACE) + + return attachment_id + + def ex_detach_network_interface(self, attachment_id, force=False): + """ + Detatch a network interface from an instance. + + :param attachment_id: The attachment ID associated with the + interface + :type attachment_id: ``str`` + + :param force: Forces the detachment. + :type force: ``bool`` + + :return: ``True`` on successful detachment, ``False`` otherwise. + :rtype: ``bool`` + """ + params = {'Action': 'DetachNetworkInterface', + 'AttachmentId': attachment_id} + + if force: + params['Force'] = True + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_modify_instance_attribute(self, node, attributes): + """ + Modify node attributes. + A list of valid attributes can be found at http://goo.gl/gxcj8 + + :param node: Node instance + :type node: :class:`Node` + + :param attributes: Dictionary with node attributes + :type attributes: ``dict`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + attributes = attributes or {} + attributes.update({'InstanceId': node.id}) + + params = {'Action': 'ModifyInstanceAttribute'} + params.update(attributes) + + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) + + def ex_modify_image_attribute(self, image, attributes): + """ + Modify image attributes. + + :param image: NodeImage instance + :type image: :class:`NodeImage` + + :param attributes: Dictionary with node attributes + :type attributes: ``dict`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + attributes = attributes or {} + attributes.update({'ImageId': image.id}) + + params = {'Action': 'ModifyImageAttribute'} + params.update(attributes) + + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) + + def ex_change_node_size(self, node, new_size): + """ + Change the node size. + Note: Node must be turned of before changing the size. + + :param node: Node instance + :type node: :class:`Node` + + :param new_size: NodeSize intance + :type new_size: :class:`NodeSize` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + if 'instancetype' in node.extra: + current_instance_type = node.extra['instancetype'] + + if current_instance_type == new_size.id: + raise ValueError('New instance size is the same as' + + 'the current one') + + attributes = {'InstanceType.Value': new_size.id} + return self.ex_modify_instance_attribute(node, attributes) + + def ex_start_node(self, node): + """ + Start the node by passing in the node object, does not work with + instance store backed instances + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + params = {'Action': 'StartInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_state_boolean(res) + + def ex_stop_node(self, node): + """ + Stop the node by passing in the node object, does not work with + instance store backed instances + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + params = {'Action': 'StopInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_state_boolean(res) + + def ex_get_console_output(self, node): + """ + Get console output for the node. + + :param node: Node which should be used + :type node: :class:`Node` + + :return: Dictionary with the following keys: + - instance_id (``str``) + - timestamp (``datetime.datetime``) - ts of the last output + - output (``str``) - console output + :rtype: ``dict`` + """ + params = { + 'Action': 'GetConsoleOutput', + 'InstanceId': node.id + } + + response = self.connection.request(self.path, params=params).object + + timestamp = findattr(element=response, + xpath='timestamp', + namespace=NAMESPACE) + + encoded_string = findattr(element=response, + xpath='output', + namespace=NAMESPACE) + + timestamp = parse_date(timestamp) + output = base64.b64decode(b(encoded_string)).decode('utf-8') + + return {'instance_id': node.id, + 'timestamp': timestamp, + 'output': output} + + def ex_list_reserved_nodes(self): + """ + List all reserved instances/nodes which can be purchased from Amazon + for one or three year terms. Reservations are made at a region level + and reduce the hourly charge for instances. + + More information can be found at http://goo.gl/ulXCC7. + + :rtype: ``list`` of :class:`.EC2ReservedNode` + """ + params = {'Action': 'DescribeReservedInstances'} + + response = self.connection.request(self.path, params=params).object + + return self._to_reserved_nodes(response, 'reservedInstancesSet/item') + + # Account specific methods + + def ex_get_limits(self): + """ + Retrieve account resource limits. + + :rtype: ``dict`` + """ + attributes = ['max-instances', 'max-elastic-ips', + 'vpc-max-elastic-ips'] + params = {} + params['Action'] = 'DescribeAccountAttributes' + + for index, attribute in enumerate(attributes): + params['AttributeName.%s' % (index)] = attribute + + response = self.connection.request(self.path, params=params) + data = response.object + + elems = data.findall(fixxpath(xpath='accountAttributeSet/item', + namespace=NAMESPACE)) + + result = {'resource': {}} + + for elem in elems: + name = findtext(element=elem, xpath='attributeName', + namespace=NAMESPACE) + value = findtext(element=elem, + xpath='attributeValueSet/item/attributeValue', + namespace=NAMESPACE) + + result['resource'][name] = int(value) + + return result + + # Deprecated extension methods + + def ex_list_keypairs(self): + """ + Lists all the keypair names and fingerprints. + + :rtype: ``list`` of ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'list_key_pairs method') + + key_pairs = self.list_key_pairs() + + result = [] + + for key_pair in key_pairs: + item = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + } + result.append(item) + + return result + + def ex_describe_all_keypairs(self): + """ + Return names for all the available key pairs. + + @note: This is a non-standard extension API, and only works for EC2. + + :rtype: ``list`` of ``str`` + """ + names = [key_pair.name for key_pair in self.list_key_pairs()] + return names + + def ex_describe_keypairs(self, name): + """ + Here for backward compatibility. + """ + return self.ex_describe_keypair(name=name) + + def ex_describe_keypair(self, name): + """ + Describes a keypair by name. + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the keypair to describe. + :type name: ``str`` + + :rtype: ``dict`` + """ + + params = { + 'Action': 'DescribeKeyPairs', + 'KeyName.1': name + } + + response = self.connection.request(self.path, params=params).object + key_name = findattr(element=response, xpath='keySet/item/keyName', + namespace=NAMESPACE) + fingerprint = findattr(element=response, + xpath='keySet/item/keyFingerprint', + namespace=NAMESPACE).strip() + return { + 'keyName': key_name, + 'keyFingerprint': fingerprint + } + + def ex_create_keypair(self, name): + """ + Creates a new keypair + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the keypair to Create. This must be + unique, otherwise an InvalidKeyPair.Duplicate exception is raised. + :type name: ``str`` + + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'create_key_pair method') + + key_pair = self.create_key_pair(name=name) + + result = { + 'keyMaterial': key_pair.private_key, + 'keyFingerprint': key_pair.fingerprint + } + + return result + + def ex_delete_keypair(self, keypair): + """ + Delete a key pair by name. + + @note: This is a non-standard extension API, and only works with EC2. + + :param keypair: The name of the keypair to delete. + :type keypair: ``str`` + + :rtype: ``bool`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'delete_key_pair method') + + keypair = KeyPair(name=keypair, public_key=None, fingerprint=None, + driver=self) + + return self.delete_key_pair(keypair) + + def ex_import_keypair_from_string(self, name, key_material): + """ + imports a new public key where the public key is passed in as a string + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the public key to import. This must be + unique, otherwise an InvalidKeyPair.Duplicate exception is raised. + :type name: ``str`` + + :param key_material: The contents of a public key file. + :type key_material: ``str`` + + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_string method') + + key_pair = self.import_key_pair_from_string(name=name, + key_material=key_material) + + result = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint + } + return result + + def ex_import_keypair(self, name, keyfile): + """ + imports a new public key where the public key is passed via a filename + + @note: This is a non-standard extension API, and only works for EC2. + + :param name: The name of the public key to import. This must be + unique, otherwise an InvalidKeyPair.Duplicate exception is raised. + :type name: ``str`` + + :param keyfile: The filename with path of the public key to import. + :type keyfile: ``str`` + + :rtype: ``dict`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_file method') + + key_pair = self.import_key_pair_from_file(name=name, + key_file_path=keyfile) + + result = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint + } + return result + + def ex_find_or_import_keypair_by_key_material(self, pubkey): + """ + Given a public key, look it up in the EC2 KeyPair database. If it + exists, return any information we have about it. Otherwise, create it. + + Keys that are created are named based on their comment and fingerprint. + + :rtype: ``dict`` + """ + key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey) + key_comment = get_pubkey_comment(pubkey, default='unnamed') + key_name = '%s-%s' % (key_comment, key_fingerprint) + + key_pairs = self.list_key_pairs() + key_pairs = [key_pair for key_pair in key_pairs if + key_pair.fingerprint == key_fingerprint] + + if len(key_pairs) >= 1: + key_pair = key_pairs[0] + result = { + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint + } + else: + result = self.ex_import_keypair_from_string(key_name, pubkey) + + return result + + def ex_list_internet_gateways(self, gateway_ids=None, filters=None): + """ + Describes available Internet gateways and whether or not they are + attached to a VPC. These are required for VPC nodes to communicate + over the Internet. + + :param gateway_ids: Return only intenet gateways matching the + provided internet gateway IDs. If not + specified, a list of all the internet + gateways in the corresponding region is + returned. + :type gateway_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain gateways. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`.VPCInternetGateway` + """ + params = {'Action': 'DescribeInternetGateways'} + + if gateway_ids: + params.update(self._pathlist('InternetGatewayId', gateway_ids)) + + if filters: + params.update(self._build_filters(filters)) + + response = self.connection.request(self.path, params=params).object + + return self._to_internet_gateways(response, 'internetGatewaySet/item') + + def ex_create_internet_gateway(self, name=None): + """ + Delete a VPC Internet gateway + + :rtype: ``bool`` + """ + params = {'Action': 'CreateInternetGateway'} + + resp = self.connection.request(self.path, params=params).object + + element = resp.findall(fixxpath(xpath='internetGateway', + namespace=NAMESPACE)) + + gateway = self._to_internet_gateway(element[0], name) + + if name and self.ex_create_tags(gateway, {'Name': name}): + gateway.extra['tags']['Name'] = name + + return gateway + + def ex_delete_internet_gateway(self, gateway): + """ + Delete a VPC Internet gateway + + :param gateway: The gateway to delete + :type gateway: :class:`.VPCInternetGateway` + + :rtype: ``bool`` + """ + params = {'Action': 'DeleteInternetGateway', + 'InternetGatewayId': gateway.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_attach_internet_gateway(self, gateway, network): + """ + Attach a Internet gateway to a VPC + + :param gateway: The gateway to attach + :type gateway: :class:`.VPCInternetGateway` + + :param network: The VPC network to attach to + :type network: :class:`.EC2Network` + + :rtype: ``bool`` + """ + params = {'Action': 'AttachInternetGateway', + 'InternetGatewayId': gateway.id, + 'VpcId': network.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_detach_internet_gateway(self, gateway, network): + """ + Detach a Internet gateway from a VPC + + :param gateway: The gateway to detach + :type gateway: :class:`.VPCInternetGateway` + + :param network: The VPC network to detach from + :type network: :class:`.EC2Network` + + :rtype: ``bool`` + """ + params = {'Action': 'DetachInternetGateway', + 'InternetGatewayId': gateway.id, + 'VpcId': network.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_list_route_tables(self, route_table_ids=None, filters=None): + """ + Describes one or more of a VPC's route tables. + These are are used to determine where network traffic is directed. + + :param route_table_ids: Return only route tables matching the + provided route table IDs. If not specified, + a list of all the route tables in the + corresponding region is returned. + :type route_table_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain route tables. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`.EC2RouteTable` + """ + params = {'Action': 'DescribeRouteTables'} + + if route_table_ids: + params.update(self._pathlist('RouteTableId', route_table_ids)) + + if filters: + params.update(self._build_filters(filters)) + + response = self.connection.request(self.path, params=params) + + return self._to_route_tables(response.object) + + def ex_create_route_table(self, network, name=None): + """ + Create a route table within a VPC. + + :param vpc_id: The VPC that the subnet should be created in. + :type vpc_id: :class:`.EC2Network` + + :rtype: :class: `.EC2RouteTable` + """ + params = {'Action': 'CreateRouteTable', + 'VpcId': network.id} + + response = self.connection.request(self.path, params=params).object + element = response.findall(fixxpath(xpath='routeTable', + namespace=NAMESPACE))[0] + + route_table = self._to_route_table(element, name=name) + + if name and self.ex_create_tags(route_table, {'Name': name}): + route_table.extra['tags']['Name'] = name + + return route_table + + def ex_delete_route_table(self, route_table): + """ + Deletes a VPC route table. + + :param route_table: The route table to delete. + :type route_table: :class:`.EC2RouteTable` + + :rtype: ``bool`` + """ + + params = {'Action': 'DeleteRouteTable', + 'RouteTableId': route_table.id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_associate_route_table(self, route_table, subnet): + """ + Associates a route table with a subnet within a VPC. + + Note: A route table can be associated with multiple subnets. + + :param route_table: The route table to associate. + :type route_table: :class:`.EC2RouteTable` + + :param subnet: The subnet to associate with. + :type subnet: :class:`.EC2Subnet` + + :return: Route table association ID. + :rtype: ``str`` + """ + + params = {'Action': 'AssociateRouteTable', + 'RouteTableId': route_table.id, + 'SubnetId': subnet.id} + + result = self.connection.request(self.path, params=params).object + association_id = findtext(element=result, + xpath='associationId', + namespace=NAMESPACE) + + return association_id + + def ex_dissociate_route_table(self, subnet_association): + """ + Dissociates a subnet from a route table. + + :param subnet_association: The subnet association object or + subnet association ID. + :type subnet_association: :class:`.EC2SubnetAssociation` or + ``str`` + + :rtype: ``bool`` + """ - def _get_terminate_boolean(self, element): - status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name')) - return any([ term_status == status - for term_status - in ('shutting-down', 'terminated') ]) - - def _to_nodes(self, object, xpath, groups=None): - return [ self._to_node(el, groups=groups) - for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE)) ] + if isinstance(subnet_association, EC2SubnetAssociation): + subnet_association_id = subnet_association.id + else: + subnet_association_id = subnet_association + + params = {'Action': 'DisassociateRouteTable', + 'AssociationId': subnet_association_id} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_replace_route_table_association(self, subnet_association, + route_table): + """ + Changes the route table associated with a given subnet in a VPC. + + Note: This method can be used to change which table is the main route + table in the VPC (Specify the main route table's association ID + and the route table to be the new main route table). + + :param subnet_association: The subnet association object or + subnet association ID. + :type subnet_association: :class:`.EC2SubnetAssociation` or + ``str`` + + :param route_table: The new route table to associate. + :type route_table: :class:`.EC2RouteTable` + + :return: New route table association ID. + :rtype: ``str`` + """ + + if isinstance(subnet_association, EC2SubnetAssociation): + subnet_association_id = subnet_association.id + else: + subnet_association_id = subnet_association + + params = {'Action': 'ReplaceRouteTableAssociation', + 'AssociationId': subnet_association_id, + 'RouteTableId': route_table.id} + + result = self.connection.request(self.path, params=params).object + new_association_id = findtext(element=result, + xpath='newAssociationId', + namespace=NAMESPACE) + + return new_association_id + + def ex_create_route(self, route_table, cidr, + internet_gateway=None, node=None, + network_interface=None, vpc_peering_connection=None): + """ + Creates a route entry in the route table. + + :param route_table: The route table to create the route in. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param internet_gateway: The internet gateway to route + traffic through. + :type internet_gateway: :class:`.VPCInternetGateway` + + :param node: The NAT instance to route traffic through. + :type node: :class:`Node` + + :param network_interface: The network interface of the node + to route traffic through. + :type network_interface: :class:`.EC2NetworkInterface` + + :param vpc_peering_connection: The VPC peering connection. + :type vpc_peering_connection: :class:`.VPCPeeringConnection` + + :rtype: ``bool`` + + Note: You must specify one of the following: internet_gateway, + node, network_interface, vpc_peering_connection. + """ + + params = {'Action': 'CreateRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + if internet_gateway: + params['GatewayId'] = internet_gateway.id + + if node: + params['InstanceId'] = node.id + + if network_interface: + params['NetworkInterfaceId'] = network_interface.id + + if vpc_peering_connection: + params['VpcPeeringConnectionId'] = vpc_peering_connection.id + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_delete_route(self, route_table, cidr): + """ + Deletes a route entry from the route table. + + :param route_table: The route table to delete the route from. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :rtype: ``bool`` + """ + + params = {'Action': 'DeleteRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def ex_replace_route(self, route_table, cidr, + internet_gateway=None, node=None, + network_interface=None, vpc_peering_connection=None): + """ + Replaces an existing route entry within a route table in a VPC. + + :param route_table: The route table to replace the route in. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param internet_gateway: The new internet gateway to route + traffic through. + :type internet_gateway: :class:`.VPCInternetGateway` + + :param node: The new NAT instance to route traffic through. + :type node: :class:`Node` + + :param network_interface: The new network interface of the node + to route traffic through. + :type network_interface: :class:`.EC2NetworkInterface` + + :param vpc_peering_connection: The new VPC peering connection. + :type vpc_peering_connection: :class:`.VPCPeeringConnection` + + :rtype: ``bool`` + + Note: You must specify one of the following: internet_gateway, + node, network_interface, vpc_peering_connection. + """ + + params = {'Action': 'ReplaceRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + if internet_gateway: + params['GatewayId'] = internet_gateway.id - def _to_node(self, element, groups=None): + if node: + params['InstanceId'] = node.id + + if network_interface: + params['NetworkInterfaceId'] = network_interface.id + + if vpc_peering_connection: + params['VpcPeeringConnectionId'] = vpc_peering_connection.id + + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) + + def _to_nodes(self, object, xpath): + return [self._to_node(el) + for el in object.findall(fixxpath(xpath=xpath, + namespace=NAMESPACE))] + + def _to_node(self, element): try: - state = self.NODE_STATE_MAP[ - findattr(element=element, xpath="instanceState/name", - namespace=NAMESPACE) - ] + state = self.NODE_STATE_MAP[findattr(element=element, + xpath="instanceState/name", + namespace=NAMESPACE) + ] except KeyError: state = NodeState.UNKNOWN - n = Node( - id=findtext(element=element, xpath='instanceId', - namespace=NAMESPACE), - name=findtext(element=element, xpath='instanceId', - namespace=NAMESPACE), - state=state, - public_ip=[findtext(element=element, xpath='ipAddress', - namespace=NAMESPACE)], - private_ip=[findtext(element=element, xpath='privateIpAddress', - namespace=NAMESPACE)], - driver=self.connection.driver, - extra={ - 'dns_name': findattr(element=element, xpath="dnsName", - namespace=NAMESPACE), - 'instanceId': findattr(element=element, xpath="instanceId", - namespace=NAMESPACE), - 'imageId': findattr(element=element, xpath="imageId", - namespace=NAMESPACE), - 'private_dns': findattr(element=element, xpath="privateDnsName", - namespace=NAMESPACE), - 'status': findattr(element=element, xpath="instanceState/name", - namespace=NAMESPACE), - 'keyname': findattr(element=element, xpath="keyName", - namespace=NAMESPACE), - 'launchindex': findattr(element=element, xpath="amiLaunchIndex", - namespace=NAMESPACE), - 'productcode': - [p.text for p in findall(element=element, - xpath="productCodesSet/item/productCode", - namespace=NAMESPACE - )], - 'instancetype': findattr(element=element, xpath="instanceType", - namespace=NAMESPACE), - 'launchdatetime': findattr(element=element, xpath="launchTime", - namespace=NAMESPACE), - 'availability': findattr(element, xpath="placement/availabilityZone", - namespace=NAMESPACE), - 'kernelid': findattr(element=element, xpath="kernelId", - namespace=NAMESPACE), - 'ramdiskid': findattr(element=element, xpath="ramdiskId", - namespace=NAMESPACE), - 'clienttoken' : findattr(element=element, xpath="clientToken", - namespace=NAMESPACE), - 'groups': groups - } - ) - return n + instance_id = findtext(element=element, xpath='instanceId', + namespace=NAMESPACE) + public_ip = findtext(element=element, xpath='ipAddress', + namespace=NAMESPACE) + public_ips = [public_ip] if public_ip else [] + private_ip = findtext(element=element, xpath='privateIpAddress', + namespace=NAMESPACE) + private_ips = [private_ip] if private_ip else [] + product_codes = [] + for p in findall(element=element, + xpath="productCodesSet/item/productCode", + namespace=NAMESPACE): + product_codes.append(p) + + # Get our tags + tags = self._get_resource_tags(element) + name = tags.get('Name', instance_id) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node']) + + # Add additional properties to our extra dictionary + extra['block_device_mapping'] = self._to_device_mappings(element) + extra['groups'] = self._get_security_groups(element) + extra['network_interfaces'] = self._to_interfaces(element) + extra['product_codes'] = product_codes + extra['tags'] = tags + + return Node(id=instance_id, name=name, state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self.connection.driver, extra=extra) def _to_images(self, object): - return [ self._to_image(el) - for el in object.findall( - fixxpath(xpath='imagesSet/item', namespace=NAMESPACE) - ) ] + return [self._to_image(el) for el in object.findall( + fixxpath(xpath='imagesSet/item', namespace=NAMESPACE)) + ] def _to_image(self, element): - n = NodeImage(id=findtext(element=element, xpath='imageId', - namespace=NAMESPACE), - name=findtext(element=element, xpath='imageLocation', - namespace=NAMESPACE), - driver=self.connection.driver) - return n - - def list_nodes(self): - params = {'Action': 'DescribeInstances' } - elem=self.connection.request(self.path, params=params).object - nodes=[] - for rs in findall(element=elem, xpath='reservationSet/item', - namespace=NAMESPACE): - groups=[g.findtext('') - for g in findall(element=rs, xpath='groupSet/item/groupId', - namespace=NAMESPACE)] - nodes += self._to_nodes(rs, 'instancesSet/item', groups) - nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) - for node in nodes: - node.public_ip.extend(nodes_elastic_ips_mappings[node.id]) - return nodes + id = findtext(element=element, xpath='imageId', namespace=NAMESPACE) + name = findtext(element=element, xpath='name', namespace=NAMESPACE) - def list_sizes(self, location=None): - # Cluster instances are currently only available in the US - N. Virginia Region - include_cluser_instances = self.region_name == 'us-east-1' - sizes = self._get_sizes(include_cluser_instances = - include_cluser_instances) + # Build block device mapping + block_device_mapping = self._to_device_mappings(element) - return sizes + # Get our tags + tags = self._get_resource_tags(element) - def _get_sizes(self, include_cluser_instances=False): - sizes = [] - for key, values in self._instance_types.iteritems(): - if not include_cluser_instances and \ - key in CLUSTER_INSTANCES_IDS: - continue - attributes = copy.deepcopy(values) - attributes.update({'price': self._get_size_price(size_id=key)}) - sizes.append(NodeSize(driver=self, **attributes)) - return sizes + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image']) - def list_images(self, location=None): - params = {'Action': 'DescribeImages'} - images = self._to_images( - self.connection.request(self.path, params=params).object - ) - return images + # Add our tags and block device mapping + extra['tags'] = tags + extra['block_device_mapping'] = block_device_mapping - def list_locations(self): - locations = [] - for index, availability_zone in enumerate(self.ex_list_availability_zones()): - locations.append(EC2NodeLocation(index, - self.friendly_name, - self.country, - self, - availability_zone)) - return locations + return NodeImage(id=id, name=name, driver=self, extra=extra) - def ex_create_keypair(self, name): - """Creates a new keypair + def _to_volume(self, element, name=None): + """ + Parse the XML element and return a StorageVolume object. - @note: This is a non-standard extension API, and - only works for EC2. + :param name: An optional name for the volume. If not provided + then either tag with a key "Name" or volume ID + will be used (which ever is available first in that + order). + :type name: ``str`` - @type name: C{str} - @param name: The name of the keypair to Create. This must be - unique, otherwise an InvalidKeyPair.Duplicate - exception is raised. + :rtype: :class:`StorageVolume` """ - params = { - 'Action': 'CreateKeyPair', - 'KeyName': name, - } - response = self.connection.request(self.path, params=params).object - key_material = findtext(element=response, xpath='keyMaterial', - namespace=NAMESPACE) - key_fingerprint = findtext(element=response, xpath='keyFingerprint', - namespace=NAMESPACE) - return { - 'keyMaterial': key_material, - 'keyFingerprint': key_fingerprint, - } + volId = findtext(element=element, xpath='volumeId', + namespace=NAMESPACE) + size = findtext(element=element, xpath='size', namespace=NAMESPACE) - def ex_import_keypair(self, name, keyfile): - """imports a new public key + # Get our tags + tags = self._get_resource_tags(element) - @note: This is a non-standard extension API, and only works for EC2. + # If name was not passed into the method then + # fall back then use the volume id + name = name if name else tags.get('Name', volId) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume']) + + extra['tags'] = tags + + return StorageVolume(id=volId, + name=name, + size=int(size), + driver=self, + extra=extra) + + def _to_snapshots(self, response): + return [self._to_snapshot(el) for el in response.findall( + fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE)) + ] + + def _to_snapshot(self, element, name=None): + snapId = findtext(element=element, xpath='snapshotId', + namespace=NAMESPACE) + size = findtext(element=element, xpath='volumeSize', + namespace=NAMESPACE) + + # Get our tags + tags = self._get_resource_tags(element) + + # If name was not passed into the method then + # fall back then use the snapshot id + name = name if name else tags.get('Name', snapId) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot']) + + # Add tags and name to the extra dict + extra['tags'] = tags + extra['name'] = name + + return VolumeSnapshot(snapId, size=int(size), + driver=self, extra=extra) + + def _to_key_pairs(self, elems): + key_pairs = [self._to_key_pair(elem=elem) for elem in elems] + return key_pairs + + def _to_key_pair(self, elem): + name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE) + fingerprint = findtext(element=elem, xpath='keyFingerprint', + namespace=NAMESPACE).strip() + private_key = findtext(element=elem, xpath='keyMaterial', + namespace=NAMESPACE) + + key_pair = KeyPair(name=name, + public_key=None, + fingerprint=fingerprint, + private_key=private_key, + driver=self) + return key_pair + + def _to_security_groups(self, response): + return [self._to_security_group(el) for el in response.findall( + fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE)) + ] + + def _to_security_group(self, element): + # security group id + sg_id = findtext(element=element, + xpath='groupId', + namespace=NAMESPACE) + + # security group name + name = findtext(element=element, + xpath='groupName', + namespace=NAMESPACE) + + # Get our tags + tags = self._get_resource_tags(element) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group']) + + # Add tags to the extra dict + extra['tags'] = tags + + # Get ingress rules + ingress_rules = self._to_security_group_rules( + element, 'ipPermissions/item' + ) - @type name: C{str} - @param name: The name of the public key to import. This must be unique, - otherwise an InvalidKeyPair.Duplicate exception is raised. + # Get egress rules + egress_rules = self._to_security_group_rules( + element, 'ipPermissionsEgress/item' + ) + + return EC2SecurityGroup(sg_id, name, ingress_rules, + egress_rules, extra=extra) - @type keyfile: C{str} - @param keyfile: The filename with path of the public key to import. + def _to_security_group_rules(self, element, xpath): + return [self._to_security_group_rule(el) for el in element.findall( + fixxpath(xpath=xpath, namespace=NAMESPACE)) + ] + def _to_security_group_rule(self, element): """ + Parse the XML element and return a SecurityGroup object. - base64key = base64.b64encode(open(os.path.expanduser(keyfile)).read()) + :rtype: :class:`EC2SecurityGroup` + """ - params = {'Action': 'ImportKeyPair', - 'KeyName': name, - 'PublicKeyMaterial': base64key - } + rule = {} + rule['protocol'] = findtext(element=element, + xpath='ipProtocol', + namespace=NAMESPACE) + + rule['from_port'] = findtext(element=element, + xpath='fromPort', + namespace=NAMESPACE) - response = self.connection.request(self.path, params=params).object - key_name = findtext(element=response, xpath='keyName', namespace=NAMESPACE) - key_fingerprint = findtext(element=response, xpath='keyFingerprint', + rule['to_port'] = findtext(element=element, + xpath='toPort', namespace=NAMESPACE) - return { - 'keyName': key_name, - 'keyFingerprint': key_fingerprint, - } - def ex_describe_keypairs(self, name): - """Describes a keypiar by name + # get security groups + elements = element.findall(fixxpath( + xpath='groups/item', + namespace=NAMESPACE + )) + + rule['group_pairs'] = [] + + for element in elements: + item = { + 'user_id': findtext( + element=element, + xpath='userId', + namespace=NAMESPACE), + 'group_id': findtext( + element=element, + xpath='groupId', + namespace=NAMESPACE), + 'group_name': findtext( + element=element, + xpath='groupName', + namespace=NAMESPACE) + } + rule['group_pairs'].append(item) - @note: This is a non-standard extension API, and only works for EC2. + # get ip ranges + elements = element.findall(fixxpath( + xpath='ipRanges/item', + namespace=NAMESPACE + )) + + rule['cidr_ips'] = [ + findtext( + element=element, + xpath='cidrIp', + namespace=NAMESPACE + ) for element in elements] + + return rule + + def _to_networks(self, response): + return [self._to_network(el) for el in response.findall( + fixxpath(xpath='vpcSet/item', namespace=NAMESPACE)) + ] + + def _to_network(self, element, name=None): + # Get the network id + vpc_id = findtext(element=element, + xpath='vpcId', + namespace=NAMESPACE) + + # Get our tags + tags = self._get_resource_tags(element) + + # Set our name if the Name key/value if available + # If we don't get anything back then use the vpc_id + name = name if name else tags.get('Name', vpc_id) + + cidr_block = findtext(element=element, + xpath='cidrBlock', + namespace=NAMESPACE) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network']) - @type name: C{str} - @param name: The name of the keypair to describe. + # Add tags to the extra dict + extra['tags'] = tags + return EC2Network(vpc_id, name, cidr_block, extra=extra) + + def _to_addresses(self, response, only_associated): """ + Builds a list of dictionaries containing elastic IP properties. - params = {'Action': 'DescribeKeyPairs', - 'KeyName.1': name - } + :param only_associated: If true, return only those addresses + that are associated with an instance. + If false, return all addresses. + :type only_associated: ``bool`` - response = self.connection.request(self.path, params=params).object - key_name = findattr(element=response, xpath='keySet/item/keyName', - namespace=NAMESPACE) - return { - 'keyName': key_name - } + :rtype: ``list`` of :class:`ElasticIP` + """ + addresses = [] + for el in response.findall(fixxpath(xpath='addressesSet/item', + namespace=NAMESPACE)): + addr = self._to_address(el, only_associated) + if addr is not None: + addresses.append(addr) + + return addresses + + def _to_address(self, element, only_associated): + instance_id = findtext(element=element, xpath='instanceId', + namespace=NAMESPACE) + + public_ip = findtext(element=element, + xpath='publicIp', + namespace=NAMESPACE) + + domain = findtext(element=element, + xpath='domain', + namespace=NAMESPACE) + + # Build our extra dict + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip']) + + # Return NoneType if only associated IPs are requested + if only_associated and not instance_id: + return None + + return ElasticIP(public_ip, domain, instance_id, extra=extra) + + def _to_subnets(self, response): + return [self._to_subnet(el) for el in response.findall( + fixxpath(xpath='subnetSet/item', namespace=NAMESPACE)) + ] + + def _to_subnet(self, element, name=None): + # Get the subnet ID + subnet_id = findtext(element=element, + xpath='subnetId', + namespace=NAMESPACE) - def ex_create_security_group(self, name, description): - """Creates a new Security Group + # Get our tags + tags = self._get_resource_tags(element) - @note: This is a non-standard extension API, and only works for EC2. + # If we don't get anything back then use the subnet_id + name = name if name else tags.get('Name', subnet_id) - @type name: C{str} - @param name: The name of the security group to Create. This must be unique. + state = findtext(element=element, + xpath='state', + namespace=NAMESPACE) - @type description: C{str} - @param description: Human readable description of a Security Group. - """ - params = {'Action': 'CreateSecurityGroup', - 'GroupName': name, - 'GroupDescription': description} - return self.connection.request(self.path, params=params).object + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet']) - def ex_authorize_security_group_permissive(self, name): - """Edit a Security Group to allow all traffic. + # Also include our tags + extra['tags'] = tags - @note: This is a non-standard extension API, and only works for EC2. + return EC2NetworkSubnet(subnet_id, name, state, extra=extra) - @type name: C{str} - @param name: The name of the security group to edit + def _to_interfaces(self, response): + return [self._to_interface(el) for el in response.findall( + fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE)) + ] + + def _to_interface(self, element, name=None): """ + Parse the XML element and return a EC2NetworkInterface object. - results = [] - params = {'Action': 'AuthorizeSecurityGroupIngress', - 'GroupName': name, - 'IpProtocol': 'tcp', - 'FromPort': '0', - 'ToPort': '65535', - 'CidrIp': '0.0.0.0/0'} - try: - results.append( - self.connection.request(self.path, params=params.copy()).object - ) - except Exception, e: - if e.args[0].find("InvalidPermission.Duplicate") == -1: - raise e - params['IpProtocol'] = 'udp' + :param name: An optional name for the interface. If not provided + then either tag with a key "Name" or the interface ID + will be used (whichever is available first in that + order). + :type name: ``str`` - try: - results.append( - self.connection.request(self.path, params=params.copy()).object - ) - except Exception, e: - if e.args[0].find("InvalidPermission.Duplicate") == -1: - raise e + :rtype: :class: `EC2NetworkInterface` + """ - params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'}) + interface_id = findtext(element=element, + xpath='networkInterfaceId', + namespace=NAMESPACE) - try: - results.append( - self.connection.request(self.path, params=params.copy()).object - ) - except Exception, e: - if e.args[0].find("InvalidPermission.Duplicate") == -1: - raise e - return results + state = findtext(element=element, + xpath='status', + namespace=NAMESPACE) - def ex_list_availability_zones(self, only_available=True): + # Get tags + tags = self._get_resource_tags(element) + + name = name if name else tags.get('Name', interface_id) + + # Build security groups + groups = self._get_security_groups(element) + + # Build private IPs + priv_ips = [] + for item in findall(element=element, + xpath='privateIpAddressesSet/item', + namespace=NAMESPACE): + + priv_ips.append({'private_ip': findtext(element=item, + xpath='privateIpAddress', + namespace=NAMESPACE), + 'private_dns': findtext(element=item, + xpath='privateDnsName', + namespace=NAMESPACE), + 'primary': findtext(element=item, + xpath='primary', + namespace=NAMESPACE)}) + + # Build our attachment dictionary which we will add into extra later + attributes_map = \ + RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment'] + attachment = self._get_extra_dict(element, attributes_map) + + # Build our extra dict + attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface'] + extra = self._get_extra_dict(element, attributes_map) + + # Include our previously built items as well + extra['tags'] = tags + extra['attachment'] = attachment + extra['private_ips'] = priv_ips + extra['groups'] = groups + + return EC2NetworkInterface(interface_id, name, state, extra=extra) + + def _to_reserved_nodes(self, object, xpath): + return [self._to_reserved_node(el) + for el in object.findall(fixxpath(xpath=xpath, + namespace=NAMESPACE))] + + def _to_reserved_node(self, element): + """ + Build an EC2ReservedNode object using the reserved instance properties. + Information on these properties can be found at http://goo.gl/ulXCC7. """ - Return a list of L{ExEC2AvailabilityZone} objects for the - current region. - Note: This is an extension method and is only available for EC2 - driver. + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node']) - @keyword only_available: If true, return only availability zones - with state 'available' - @type only_available: C{string} + try: + size = [size for size in self.list_sizes() if + size.id == extra['instance_type']][0] + except IndexError: + size = None + + return EC2ReservedNode(id=findtext(element=element, + xpath='reservedInstancesId', + namespace=NAMESPACE), + state=findattr(element=element, + xpath='state', + namespace=NAMESPACE), + driver=self, + size=size, + extra=extra) + + def _to_device_mappings(self, object): + return [self._to_device_mapping(el) for el in object.findall( + fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE)) + ] + + def _to_device_mapping(self, element): """ - params = {'Action': 'DescribeAvailabilityZones'} + Parse the XML element and return a dictionary of device properties. + Additional information can be found at http://goo.gl/GjWYBf. - if only_available: - params.update({'Filter.0.Name': 'state'}) - params.update({'Filter.0.Value.0': 'available'}) + @note: EBS volumes do not have a virtual name. Only ephemeral + disks use this property. + :rtype: ``dict`` + """ + mapping = {} - params.update({'Filter.1.Name': 'region-name'}) - params.update({'Filter.1.Value.0': self.region_name}) + mapping['device_name'] = findattr(element=element, + xpath='deviceName', + namespace=NAMESPACE) + + mapping['virtual_name'] = findattr(element=element, + xpath='virtualName', + namespace=NAMESPACE) + + # If virtual name does not exist then this is an EBS volume. + # Build the EBS dictionary leveraging the _get_extra_dict method. + if mapping['virtual_name'] is None: + mapping['ebs'] = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume']) + + return mapping + + def _to_internet_gateways(self, object, xpath): + return [self._to_internet_gateway(el) + for el in object.findall(fixxpath(xpath=xpath, + namespace=NAMESPACE))] + + def _to_internet_gateway(self, element, name=None): + id = findtext(element=element, + xpath='internetGatewayId', + namespace=NAMESPACE) + + vpc_id = findtext(element=element, + xpath='attachmentSet/item/vpcId', + namespace=NAMESPACE) - result = self.connection.request(self.path, - params=params.copy()).object + state = findtext(element=element, + xpath='attachmentSet/item/state', + namespace=NAMESPACE) - availability_zones = [] - for element in findall(element=result, xpath='availabilityZoneInfo/item', - namespace=NAMESPACE): - name = findtext(element=element, xpath='zoneName', - namespace=NAMESPACE) - zone_state = findtext(element=element, xpath='zoneState', + # If there's no attachment state, let's + # set it to available + if not state: + state = 'available' + + # Get our tags + tags = self._get_resource_tags(element) + + # If name was not passed into the method then + # fall back then use the gateway id + name = name if name else tags.get('Name', id) + + return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id, + state=state, driver=self.connection.driver, + extra={'tags': tags}) + + def _to_route_tables(self, response): + return [self._to_route_table(el) for el in response.findall( + fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE)) + ] + + def _to_route_table(self, element, name=None): + # route table id + route_table_id = findtext(element=element, + xpath='routeTableId', namespace=NAMESPACE) - region_name = findtext(element=element, xpath='regionName', - namespace=NAMESPACE) - availability_zone = ExEC2AvailabilityZone( - name=name, - zone_state=zone_state, - region_name=region_name - ) - availability_zones.append(availability_zone) + # Get our tags + tags = self._get_resource_tags(element) - return availability_zones + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table']) + + # Add tags to the extra dict + extra['tags'] = tags + + # Get routes + routes = self._to_routes(element, 'routeSet/item') + + # Get subnet associations + subnet_associations = self._to_subnet_associations( + element, 'associationSet/item') + + # Get propagating routes virtual private gateways (VGW) IDs + propagating_gateway_ids = [] + for el in element.findall(fixxpath(xpath='propagatingVgwSet/item', + namespace=NAMESPACE)): + propagating_gateway_ids.append(findtext(element=el, + xpath='gatewayId', + namespace=NAMESPACE)) + + name = name if name else tags.get('Name', id) + + return EC2RouteTable(route_table_id, name, routes, subnet_associations, + propagating_gateway_ids, extra=extra) + + def _to_routes(self, element, xpath): + return [self._to_route(el) for el in element.findall( + fixxpath(xpath=xpath, namespace=NAMESPACE)) + ] - def ex_describe_tags(self, node): + def _to_route(self, element): """ - Return a dictionary of tags for this instance. - - @type node: C{Node} - @param node: Node instance + Parse the XML element and return a route object - @return dict Node tags + :rtype: :class: `EC2Route` """ - params = { 'Action': 'DescribeTags', - 'Filter.0.Name': 'resource-id', - 'Filter.0.Value.0': node.id, - 'Filter.1.Name': 'resource-type', - 'Filter.1.Value.0': 'instance', - } - result = self.connection.request(self.path, - params=params.copy()).object + destination_cidr = findtext(element=element, + xpath='destinationCidrBlock', + namespace=NAMESPACE) + + gateway_id = findtext(element=element, + xpath='gatewayId', + namespace=NAMESPACE) + + instance_id = findtext(element=element, + xpath='instanceId', + namespace=NAMESPACE) - tags = {} - for element in findall(element=result, xpath='tagSet/item', - namespace=NAMESPACE): - key = findtext(element=element, xpath='key', namespace=NAMESPACE) - value = findtext(element=element, xpath='value', namespace=NAMESPACE) + owner_id = findtext(element=element, + xpath='instanceOwnerId', + namespace=NAMESPACE) - tags[key] = value - return tags + interface_id = findtext(element=element, + xpath='networkInterfaceId', + namespace=NAMESPACE) + + state = findtext(element=element, + xpath='state', + namespace=NAMESPACE) + + origin = findtext(element=element, + xpath='origin', + namespace=NAMESPACE) + + vpc_peering_connection_id = findtext(element=element, + xpath='vpcPeeringConnectionId', + namespace=NAMESPACE) + + return EC2Route(destination_cidr, gateway_id, instance_id, owner_id, + interface_id, state, origin, vpc_peering_connection_id) + + def _to_subnet_associations(self, element, xpath): + return [self._to_subnet_association(el) for el in element.findall( + fixxpath(xpath=xpath, namespace=NAMESPACE)) + ] - def ex_create_tags(self, node, tags): + def _to_subnet_association(self, element): """ - Create tags for an instance. + Parse the XML element and return a route table association object - @type node: C{Node} - @param node: Node instance - @param tags: A dictionary or other mapping of strings to strings, - associating tag names with tag values. + :rtype: :class: `EC2SubnetAssociation` """ - if not tags: - return - params = { 'Action': 'CreateTags', - 'ResourceId.0': node.id } - for i, key in enumerate(tags): - params['Tag.%d.Key' % i] = key - params['Tag.%d.Value' % i] = tags[key] + association_id = findtext(element=element, + xpath='routeTableAssociationId', + namespace=NAMESPACE) - self.connection.request(self.path, - params=params.copy()).object + route_table_id = findtext(element=element, + xpath='routeTableId', + namespace=NAMESPACE) - def ex_delete_tags(self, node, tags): - """ - Delete tags from an instance. + subnet_id = findtext(element=element, + xpath='subnetId', + namespace=NAMESPACE) + + main = findtext(element=element, + xpath='main', + namespace=NAMESPACE) - @type node: C{Node} - @param node: Node instance - @param tags: A dictionary or other mapping of strings to strings, - specifying the tag names and tag values to be deleted. + main = True if main else False + + return EC2SubnetAssociation(association_id, route_table_id, + subnet_id, main) + + def _pathlist(self, key, arr): """ - if not tags: - return + Converts a key and an array of values into AWS query param format. + """ + params = {} + i = 0 - params = { 'Action': 'DeleteTags', - 'ResourceId.0': node.id } - for i, key in enumerate(tags): - params['Tag.%d.Key' % i] = key - params['Tag.%d.Value' % i] = tags[key] + for value in arr: + i += 1 + params['%s.%s' % (key, i)] = value - self.connection.request(self.path, - params=params.copy()).object + return params - def ex_describe_addresses(self, nodes): - """ - Return Elastic IP addresses for all the nodes in the provided list. + def _get_boolean(self, element): + tag = '{%s}%s' % (NAMESPACE, 'return') + return element.findtext(tag) == 'true' - @type nodes: C{list} - @param nodes: List of C{Node} instances + def _get_terminate_boolean(self, element): + status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name')) + return any([term_status == status + for term_status + in ('shutting-down', 'terminated')]) - @return dict Dictionary where a key is a node ID and the value is a - list with the Elastic IP addresses associated with this node. + def _add_instance_filter(self, params, node): """ - if not nodes: - return {} - - params = { 'Action': 'DescribeAddresses' } + Add instance filter to the provided params dictionary. + """ + filters = {'instance-id': node.id} + params.update(self._build_filters(filters)) - if len(nodes) == 1: - params.update({ - 'Filter.0.Name': 'instance-id', - 'Filter.0.Value.0': nodes[0].id - }) + return params - result = self.connection.request(self.path, - params=params.copy()).object + def _get_state_boolean(self, element): + """ + Checks for the instances's state + """ + state = findall(element=element, + xpath='instancesSet/item/currentState/name', + namespace=NAMESPACE)[0].text - node_instance_ids = [ node.id for node in nodes ] - nodes_elastic_ip_mappings = {} + return state in ('stopping', 'pending', 'starting') - for node_id in node_instance_ids: - nodes_elastic_ip_mappings.setdefault(node_id, []) - for element in findall(element=result, xpath='addressesSet/item', - namespace=NAMESPACE): - instance_id = findtext(element=element, xpath='instanceId', - namespace=NAMESPACE) - ip_address = findtext(element=element, xpath='publicIp', - namespace=NAMESPACE) + def _get_extra_dict(self, element, mapping): + """ + Extract attributes from the element based on rules provided in the + mapping dictionary. - if instance_id not in node_instance_ids: - continue + :param element: Element to parse the values from. + :type element: xml.etree.ElementTree.Element. - nodes_elastic_ip_mappings[instance_id].append(ip_address) - return nodes_elastic_ip_mappings + :param mapping: Dictionary with the extra layout + :type node: :class:`Node` - def ex_describe_addresses_for_node(self, node): + :rtype: ``dict`` """ - Return a list of Elastic IP addresses associated with this node. + extra = {} + for attribute, values in mapping.items(): + transform_func = values['transform_func'] + value = findattr(element=element, + xpath=values['xpath'], + namespace=NAMESPACE) + if value is not None: + extra[attribute] = transform_func(value) + else: + extra[attribute] = None - @type node: C{Node} - @param node: Node instance + return extra - @return list Elastic IP addresses attached to this node. + def _get_resource_tags(self, element): """ - node_elastic_ips = self.ex_describe_addresses([node]) - return node_elastic_ips[node.id] + Parse tags from the provided element and return a dictionary with + key/value pairs. - def ex_modify_instance_attribute(self, node, attributes): + :rtype: ``dict`` """ - Modify node attributes. - A list of valid attributes can be found at http://goo.gl/gxcj8 + tags = {} - @type node: C{Node} - @param node: Node instance + # Get our tag set by parsing the element + tag_set = findall(element=element, + xpath='tagSet/item', + namespace=NAMESPACE) + + for tag in tag_set: + key = findtext(element=tag, + xpath='key', + namespace=NAMESPACE) - @type attributes: C{dict} - @param attributes: Dictionary with node attributes + value = findtext(element=tag, + xpath='value', + namespace=NAMESPACE) - @return bool True on success, False otherwise. - """ - attributes = attributes or {} - attributes.update({'InstanceId': node.id}) + tags[key] = value - params = { 'Action': 'ModifyInstanceAttribute' } - params.update(attributes) + return tags - result = self.connection.request(self.path, - params=params.copy()).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + def _get_block_device_mapping_params(self, block_device_mapping): + """ + Return a list of dictionaries with query parameters for + a valid block device mapping. - def ex_change_node_size(self, node, new_size): + :param mapping: List of dictionaries with the drive layout + :type mapping: ``list`` or ``dict`` + + :return: Dictionary representation of the drive mapping + :rtype: ``dict`` """ - Change the node size. - Note: Node must be turned of before changing the size. - @type node: C{Node} - @param node: Node instance + if not isinstance(block_device_mapping, (list, tuple)): + raise AttributeError( + 'block_device_mapping not list or tuple') - @type new_size: C{NodeSize} - @param new_size: NodeSize intance + params = {} + + for idx, mapping in enumerate(block_device_mapping): + idx += 1 # We want 1-based indexes + if not isinstance(mapping, dict): + raise AttributeError( + 'mapping %s in block_device_mapping ' + 'not a dict' % mapping) + for k, v in mapping.items(): + if not isinstance(v, dict): + params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v) + else: + for key, value in v.items(): + params['BlockDeviceMapping.%d.%s.%s' + % (idx, k, key)] = str(value) + return params - @return bool True on success, False otherwise. + def _get_common_security_group_params(self, group_id, protocol, + from_port, to_port, cidr_ips, + group_pairs): """ - if 'instancetype' in node.extra: - current_instance_type = node.extra['instancetype'] + Return a dictionary with common query parameters which are used when + operating on security groups. - if current_instance_type == new_size.id: - raise ValueError('New instance size is the same as the current one') + :rtype: ``dict`` + """ + params = {'GroupId': group_id, + 'IpPermissions.1.IpProtocol': protocol, + 'IpPermissions.1.FromPort': from_port, + 'IpPermissions.1.ToPort': to_port} + + if cidr_ips is not None: + ip_ranges = {} + for index, cidr_ip in enumerate(cidr_ips): + index += 1 + + ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp' + % (index)] = cidr_ip + + params.update(ip_ranges) + + if group_pairs is not None: + user_groups = {} + for index, group_pair in enumerate(group_pairs): + index += 1 + + if 'group_id' in group_pair.keys(): + user_groups['IpPermissions.1.Groups.%s.GroupId' + % (index)] = group_pair['group_id'] + + if 'group_name' in group_pair.keys(): + user_groups['IpPermissions.1.Groups.%s.GroupName' + % (index)] = group_pair['group_name'] + + if 'user_id' in group_pair.keys(): + user_groups['IpPermissions.1.Groups.%s.UserId' + % (index)] = group_pair['user_id'] - attributes = { 'InstanceType.Value': new_size.id } - return self.ex_modify_instance_attribute(node, attributes) + params.update(user_groups) - def create_node(self, **kwargs): - """Create a new EC2 node + return params - See L{NodeDriver.create_node} for more keyword args. - Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com] + def _get_security_groups(self, element): + """ + Parse security groups from the provided element and return a + list of security groups with the id ane name key/value pairs. - @keyword ex_mincount: Minimum number of instances to launch - @type ex_mincount: C{int} + :rtype: ``list`` of ``dict`` + """ + groups = [] - @keyword ex_maxcount: Maximum number of instances to launch - @type ex_maxcount: C{int} + for item in findall(element=element, + xpath='groupSet/item', + namespace=NAMESPACE): + groups.append({ + 'group_id': findtext(element=item, + xpath='groupId', + namespace=NAMESPACE), + 'group_name': findtext(element=item, + xpath='groupName', + namespace=NAMESPACE) + }) - @keyword ex_securitygroup: Name of security group - @type ex_securitygroup: C{str} + return groups - @keyword ex_keyname: The name of the key pair - @type ex_keyname: C{str} + def _build_filters(self, filters): + """ + Return a dictionary with filter query parameters which are used when + listing networks, security groups, etc. - @keyword ex_userdata: User data - @type ex_userdata: C{str} + :param filters: Dict of filter names and filter values + :type filters: ``dict`` - @keyword ex_clienttoken: Unique identifier to ensure idempotency - @type ex_clienttoken: C{str} + :rtype: ``dict`` """ - image = kwargs["image"] - size = kwargs["size"] - params = { - 'Action': 'RunInstances', - 'ImageId': image.id, - 'MinCount': kwargs.get('ex_mincount','1'), - 'MaxCount': kwargs.get('ex_maxcount','1'), - 'InstanceType': size.id - } - if 'ex_securitygroup' in kwargs: - if not isinstance(kwargs['ex_securitygroup'], list): - kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']] - for sig in range(len(kwargs['ex_securitygroup'])): - params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig] - - if 'location' in kwargs: - availability_zone = getattr(kwargs['location'], 'availability_zone', - None) - if availability_zone: - if availability_zone.region_name != self.region_name: - raise AttributeError('Invalid availability zone: %s' - % (availability_zone.name)) - params['Placement.AvailabilityZone'] = availability_zone.name + filter_entries = {} - if 'ex_keyname' in kwargs: - params['KeyName'] = kwargs['ex_keyname'] + for filter_idx, filter_data in enumerate(filters.items()): + filter_idx += 1 # We want 1-based indexes + filter_name, filter_values = filter_data + filter_key = 'Filter.%s.Name' % (filter_idx) + filter_entries[filter_key] = filter_name + + if isinstance(filter_values, list): + for value_idx, value in enumerate(filter_values): + value_idx += 1 # We want 1-based indexes + value_key = 'Filter.%s.Value.%s' % (filter_idx, + value_idx) + filter_entries[value_key] = value + else: + value_key = 'Filter.%s.Value.1' % (filter_idx) + filter_entries[value_key] = filter_values - if 'ex_userdata' in kwargs: - params['UserData'] = base64.b64encode(kwargs['ex_userdata']) + return filter_entries - if 'ex_clienttoken' in kwargs: - params['ClientToken'] = kwargs['ex_clienttoken'] - object = self.connection.request(self.path, params=params).object - nodes = self._to_nodes(object, 'instancesSet/item') +class EC2NodeDriver(BaseEC2NodeDriver): + """ + Amazon EC2 node driver. + """ - for node in nodes: - self.ex_create_tags(node=node, tags={'Name': kwargs['name']}) + connectionCls = EC2Connection + type = Provider.EC2 + name = 'Amazon EC2' + website = 'http://aws.amazon.com/ec2/' + path = '/' - if len(nodes) == 1: - return nodes[0] - else: - return nodes + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.UNKNOWN, + 'terminated': NodeState.TERMINATED, + 'stopped': NodeState.STOPPED + } - def reboot_node(self, node): - """ - Reboot the node by passing in the node object - """ - params = {'Action': 'RebootInstances'} - params.update(self._pathlist('InstanceId', [node.id])) - res = self.connection.request(self.path, params=params).object - return self._get_boolean(res) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', **kwargs): + if hasattr(self, '_region'): + region = self._region + + if region not in VALID_EC2_REGIONS: + raise ValueError('Invalid region: %s' % (region)) + + details = REGION_DETAILS[region] + self.region_name = region + self.api_name = details['api_name'] + self.country = details['country'] + + self.connectionCls.host = details['endpoint'] + + super(EC2NodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) - def destroy_node(self, node): - """ - Destroy node by passing in the node object - """ - params = {'Action': 'TerminateInstances'} - params.update(self._pathlist('InstanceId', [node.id])) - res = self.connection.request(self.path, params=params).object - return self._get_terminate_boolean(res) class IdempotentParamError(LibcloudError): """ - Request used the same client token as a previous, but non-identical request. + Request used the same client token as a previous, + but non-identical request. """ + def __str__(self): return repr(self.value) -class EC2EUConnection(EC2Connection): - """ - Connection class for EC2 in the Western Europe Region - """ - host = EC2_EU_WEST_HOST class EC2EUNodeDriver(EC2NodeDriver): """ - Driver class for EC2 in the Western Europe Region + Driver class for EC2 in the Western Europe Region. """ - - api_name = 'ec2_eu_west' name = 'Amazon EC2 (eu-west-1)' - friendly_name = 'Amazon Europe Ireland' - country = 'IE' - region_name = 'eu-west-1' - connectionCls = EC2EUConnection - _instance_types = EC2_EU_WEST_INSTANCE_TYPES + _region = 'eu-west-1' -class EC2USWestConnection(EC2Connection): + +class EC2USWestNodeDriver(EC2NodeDriver): """ - Connection class for EC2 in the Western US Region + Driver class for EC2 in the Western US Region """ + name = 'Amazon EC2 (us-west-1)' + _region = 'us-west-1' - host = EC2_US_WEST_HOST -class EC2USWestNodeDriver(EC2NodeDriver): +class EC2USWestOregonNodeDriver(EC2NodeDriver): """ - Driver class for EC2 in the Western US Region + Driver class for EC2 in the US West Oregon region. """ + name = 'Amazon EC2 (us-west-2)' + _region = 'us-west-2' - api_name = 'ec2_us_west' - name = 'Amazon EC2 (us-west-1)' - friendly_name = 'Amazon US N. California' - country = 'US' - region_name = 'us-west-1' - connectionCls = EC2USWestConnection - _instance_types = EC2_US_WEST_INSTANCE_TYPES -class EC2APSEConnection(EC2Connection): +class EC2APSENodeDriver(EC2NodeDriver): """ - Connection class for EC2 in the Southeast Asia Pacific Region + Driver class for EC2 in the Southeast Asia Pacific Region. """ + name = 'Amazon EC2 (ap-southeast-1)' + _region = 'ap-southeast-1' - host = EC2_AP_SOUTHEAST_HOST -class EC2APNEConnection(EC2Connection): +class EC2APNENodeDriver(EC2NodeDriver): """ - Connection class for EC2 in the Northeast Asia Pacific Region + Driver class for EC2 in the Northeast Asia Pacific Region. """ + name = 'Amazon EC2 (ap-northeast-1)' + _region = 'ap-northeast-1' - host = EC2_AP_NORTHEAST_HOST -class EC2APSENodeDriver(EC2NodeDriver): +class EC2SAEastNodeDriver(EC2NodeDriver): """ - Driver class for EC2 in the Southeast Asia Pacific Region + Driver class for EC2 in the South America (Sao Paulo) Region. """ + name = 'Amazon EC2 (sa-east-1)' + _region = 'sa-east-1' - api_name = 'ec2_ap_southeast' - name = 'Amazon EC2 (ap-southeast-1)' - friendly_name = 'Amazon Asia-Pacific Singapore' - country = 'SG' - region_name = 'ap-southeast-1' - connectionCls = EC2APSEConnection - _instance_types = EC2_AP_SOUTHEAST_INSTANCE_TYPES -class EC2APNENodeDriver(EC2NodeDriver): +class EC2APSESydneyNodeDriver(EC2NodeDriver): """ - Driver class for EC2 in the Northeast Asia Pacific Region + Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region. """ + name = 'Amazon EC2 (ap-southeast-2)' + _region = 'ap-southeast-2' - api_name = 'ec2_ap_northeast' - name = 'Amazon EC2 (ap-northeast-1)' - friendly_name = 'Amazon Asia-Pacific Tokyo' - country = 'JP' - region_name = 'ap-northeast-1' - connectionCls = EC2APNEConnection - _instance_types = EC2_AP_NORTHEAST_INSTANCE_TYPES class EucConnection(EC2Connection): """ @@ -940,50 +5360,90 @@ host = None -class EucNodeDriver(EC2NodeDriver): + +class EucNodeDriver(BaseEC2NodeDriver): """ Driver class for Eucalyptus """ name = 'Eucalyptus' + website = 'http://www.eucalyptus.com/' + api_name = 'ec2_us_east' + region_name = 'us-east-1' connectionCls = EucConnection - _instance_types = EC2_US_WEST_INSTANCE_TYPES - def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None): + def __init__(self, key, secret=None, secure=True, host=None, + path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION): + """ + @inherits: :class:`EC2NodeDriver.__init__` + + :param path: The host where the API can be reached. + :type path: ``str`` + + :param api_version: The API version to extend support for + Eucalyptus proprietary API calls + :type api_version: ``str`` + """ super(EucNodeDriver, self).__init__(key, secret, secure, host, port) + if path is None: - path = "/services/Eucalyptus" + path = '/services/Eucalyptus' + self.path = path + self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version) def list_locations(self): - raise NotImplementedError, \ - 'list_locations not implemented for this driver' + raise NotImplementedError( + 'list_locations not implemented for this driver') + + def _to_sizes(self, response): + return [self._to_size(el) for el in response.findall( + fixxpath(xpath='instanceTypeDetails/item', + namespace=self.EUCA_NAMESPACE))] + + def _to_size(self, el): + name = findtext(element=el, + xpath='name', + namespace=self.EUCA_NAMESPACE) + cpu = findtext(element=el, + xpath='cpu', + namespace=self.EUCA_NAMESPACE) + disk = findtext(element=el, + xpath='disk', + namespace=self.EUCA_NAMESPACE) + memory = findtext(element=el, + xpath='memory', + namespace=self.EUCA_NAMESPACE) + + return NodeSize(id=name, + name=name, + ram=int(memory), + disk=int(disk), + bandwidth=None, + price=None, + driver=EucNodeDriver, + extra={ + 'cpu': int(cpu) + }) + + def list_sizes(self): + """ + List available instance flavors/sizes + + :rtype: ``list`` of :class:`NodeSize` + """ + params = {'Action': 'DescribeInstanceTypes'} + response = self.connection.request(self.path, params=params).object + + return self._to_sizes(response) + + def _add_instance_filter(self, params, node): + """ + Eucalyptus driver doesn't support filtering on instance id so this is a + no-op. + """ + pass -# Nimbus clouds have 3 EC2-style instance types but their particular RAM -# allocations are configured by the admin -NIMBUS_INSTANCE_TYPES = { - 'm1.small': { - 'id' : 'm1.small', - 'name': 'Small Instance', - 'ram': None, - 'disk': None, - 'bandwidth': None, - }, - 'm1.large': { - 'id' : 'm1.large', - 'name': 'Large Instance', - 'ram': None, - 'disk': None, - 'bandwidth': None, - }, - 'm1.xlarge': { - 'id' : 'm1.xlarge', - 'name': 'Extra Large Instance', - 'ram': None, - 'disk': None, - 'bandwidth': None, - }, -} class NimbusConnection(EC2Connection): """ @@ -992,24 +5452,319 @@ host = None -class NimbusNodeDriver(EC2NodeDriver): + +class NimbusNodeDriver(BaseEC2NodeDriver): """ Driver class for Nimbus """ type = Provider.NIMBUS name = 'Nimbus' + website = 'http://www.nimbusproject.org/' + country = 'Private' api_name = 'nimbus' region_name = 'nimbus' friendly_name = 'Nimbus Private Cloud' connectionCls = NimbusConnection - _instance_types = NIMBUS_INSTANCE_TYPES def ex_describe_addresses(self, nodes): - """Nimbus doesn't support elastic IPs, so this is a passthrough + """ + Nimbus doesn't support elastic IPs, so this is a pass-through. + + @inherits: :class:`EC2NodeDriver.ex_describe_addresses` """ nodes_elastic_ip_mappings = {} for node in nodes: # empty list per node nodes_elastic_ip_mappings[node.id] = [] return nodes_elastic_ip_mappings + + def ex_create_tags(self, resource, tags): + """ + Nimbus doesn't support creating tags, so this is a pass-through. + + @inherits: :class:`EC2NodeDriver.ex_create_tags` + """ + pass + + +class OutscaleConnection(EC2Connection): + """ + Connection class for Outscale + """ + + host = None + + +class OutscaleNodeDriver(BaseEC2NodeDriver): + """ + Base Outscale FCU node driver. + + Outscale per provider driver classes inherit from it. + """ + + connectionCls = OutscaleConnection + name = 'Outscale' + website = 'http://www.outscale.com' + path = '/' + + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.UNKNOWN, + 'terminated': NodeState.TERMINATED, + 'stopped': NodeState.STOPPED + } + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + if hasattr(self, '_region'): + region = self._region + + if region_details is None: + raise ValueError('Invalid region_details argument') + + if region not in region_details.keys(): + raise ValueError('Invalid region: %s' % (region)) + + self.region_name = region + self.region_details = region_details + details = self.region_details[region] + self.api_name = details['api_name'] + self.country = details['country'] + + self.connectionCls.host = details['endpoint'] + + self._not_implemented_msg =\ + 'This method is not supported in the Outscale driver' + + super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + + def create_node(self, **kwargs): + """ + Create a new Outscale node. The ex_iamprofile keyword is not supported. + + @inherits: :class:`BaseEC2NodeDriver.create_node` + + :keyword ex_keyname: The name of the key pair + :type ex_keyname: ``str`` + + :keyword ex_userdata: User data + :type ex_userdata: ``str`` + + :keyword ex_security_groups: A list of names of security groups to + assign to the node. + :type ex_security_groups: ``list`` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_mincount: Minimum number of instances to launch + :type ex_mincount: ``int`` + + :keyword ex_maxcount: Maximum number of instances to launch + :type ex_maxcount: ``int`` + + :keyword ex_clienttoken: Unique identifier to ensure idempotency + :type ex_clienttoken: ``str`` + + :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device + mappings. + :type ex_blockdevicemappings: ``list`` of ``dict`` + + :keyword ex_ebs_optimized: EBS-Optimized if True + :type ex_ebs_optimized: ``bool`` + """ + if 'ex_iamprofile' in kwargs: + raise NotImplementedError("ex_iamprofile not implemented") + return super(OutscaleNodeDriver, self).create_node(**kwargs) + + def ex_create_network(self, cidr_block, name=None): + """ + Create a network/VPC. Outscale does not support instance_tenancy. + + :param cidr_block: The CIDR block assigned to the network + :type cidr_block: ``str`` + + :param name: An optional name for the network + :type name: ``str`` + + :return: Dictionary of network properties + :rtype: ``dict`` + """ + return super(OutscaleNodeDriver, self).ex_create_network(cidr_block, + name=name) + + def ex_modify_instance_attribute(self, node, disable_api_termination=None, + ebs_optimized=None, group_id=None, + source_dest_check=None, user_data=None, + instance_type=None): + """ + Modify node attributes. + Ouscale support the following attributes: + 'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n', + 'SourceDestCheck.Value', 'UserData.Value', + 'InstanceType.Value' + + :param node: Node instance + :type node: :class:`Node` + + :param attributes: Dictionary with node attributes + :type attributes: ``dict`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + attributes = {} + + if disable_api_termination is not None: + attributes['DisableApiTermination.Value'] = disable_api_termination + if ebs_optimized is not None: + attributes['EbsOptimized'] = ebs_optimized + if group_id is not None: + attributes['GroupId.n'] = group_id + if source_dest_check is not None: + attributes['SourceDestCheck.Value'] = source_dest_check + if user_data is not None: + attributes['UserData.Value'] = user_data + if instance_type is not None: + attributes['InstanceType.Value'] = instance_type + + return super(OutscaleNodeDriver, self).ex_modify_instance_attribute( + node, attributes) + + def ex_register_image(self, name, description=None, architecture=None, + root_device_name=None, block_device_mapping=None): + """ + Registers a Machine Image based off of an EBS-backed instance. + Can also be used to create images from snapshots. + + Outscale does not support image_location, kernel_id and ramdisk_id. + + :param name: The name for the AMI being registered + :type name: ``str`` + + :param description: The description of the AMI (optional) + :type description: ``str`` + + :param architecture: The architecture of the AMI (i386/x86_64) + (optional) + :type architecture: ``str`` + + :param root_device_name: The device name for the root device + Required if registering a EBS-backed AMI + :type root_device_name: ``str`` + + :param block_device_mapping: A dictionary of the disk layout + (optional) + :type block_device_mapping: ``dict`` + + :rtype: :class:`NodeImage` + """ + return super(OutscaleNodeDriver, self).ex_register_image( + name, description=description, architecture=architecture, + root_device_name=root_device_name, + block_device_mapping=block_device_mapping) + + def ex_copy_image(self, source_region, image, name=None, description=None): + """ + Outscale does not support copying images. + + @inherits: :class:`EC2NodeDriver.ex_copy_image` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_get_limits(self): + """ + Outscale does not support getting limits. + + @inherits: :class:`EC2NodeDriver.ex_get_limits` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_create_network_interface(self, subnet, name=None, + description=None, + private_ip_address=None): + """ + Outscale does not support creating a network interface within a VPC. + + @inherits: :class:`EC2NodeDriver.ex_create_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_delete_network_interface(self, network_interface): + """ + Outscale does not support deleting a network interface within a VPC. + + @inherits: :class:`EC2NodeDriver.ex_delete_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_attach_network_interface_to_node(self, network_interface, + node, device_index): + """ + Outscale does not support attaching a network interface. + + @inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_detach_network_interface(self, attachment_id, force=False): + """ + Outscale does not support detaching a network interface + + @inherits: :class:`EC2NodeDriver.ex_detach_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + def list_sizes(self, location=None): + """ + List available instance flavors/sizes + + This override the EC2 default method in order to use Outscale infos. + + :rtype: ``list`` of :class:`NodeSize` + """ + available_types =\ + self.region_details[self.region_name]['instance_types'] + sizes = [] + + for instance_type in available_types: + attributes = OUTSCALE_INSTANCE_TYPES[instance_type] + attributes = copy.deepcopy(attributes) + price = self._get_size_price(size_id=instance_type) + attributes.update({'price': price}) + sizes.append(NodeSize(driver=self, **attributes)) + return sizes + + +class OutscaleSASNodeDriver(OutscaleNodeDriver): + """ + Outscale SAS node driver + """ + name = 'Outscale SAS' + type = Provider.OUTSCALE_SAS + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + super(OutscaleSASNodeDriver, self).__init__( + key=key, secret=secret, secure=secure, host=host, port=port, + region=region, region_details=OUTSCALE_SAS_REGION_DETAILS, + **kwargs) + + +class OutscaleINCNodeDriver(OutscaleNodeDriver): + """ + Outscale INC node driver + """ + name = 'Outscale INC' + type = Provider.OUTSCALE_INC + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + super(OutscaleINCNodeDriver, self).__init__( + key=key, secret=secret, secure=secure, host=host, port=port, + region=region, region_details=OUTSCALE_INC_REGION_DETAILS, + **kwargs) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ecp.py libcloud-0.15.1/libcloud/compute/drivers/ecp.py --- libcloud-0.5.0/libcloud/compute/drivers/ecp.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ecp.py 2014-06-11 14:27:59.000000000 +0000 @@ -18,29 +18,32 @@ """ import time import base64 -import httplib -import socket import os +import socket +import binascii + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b # JSON is included in the standard library starting with Python 2.6. For 2.5 # and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson try: - import json -except: import simplejson as json +except ImportError: + import json from libcloud.common.base import Response, ConnectionUserAndKey from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation from libcloud.compute.base import NodeImage, Node from libcloud.compute.types import Provider, NodeState, InvalidCredsError -from libcloud.compute.base import is_private_subnet +from libcloud.utils.networking import is_private_subnet -#Defaults +# Defaults API_HOST = '' -API_PORT = (80,443) +API_PORT = (80, 443) -class ECPResponse(Response): +class ECPResponse(Response): def success(self): if self.status == httplib.OK or self.status == httplib.CREATED: try: @@ -62,13 +65,14 @@ def parse_error(self): return self.error - #Interpret the json responses - no error checking required + # Interpret the json responses - no error checking required def parse_body(self): return json.loads(self.body) def getheaders(self): return self.headers + class ECPConnection(ConnectionUserAndKey): """ Connection class for the Enomaly ECP driver @@ -79,13 +83,13 @@ port = API_PORT def add_default_headers(self, headers): - #Authentication + # Authentication username = self.user_id password = self.key - base64string = base64.encodestring( - '%s:%s' % (username, password))[:-1] - authheader = "Basic %s" % base64string - headers['Authorization']= authheader + base64string = base64.encodestring( + b('%s:%s' % (username, password)))[:-1] + authheader = "Basic %s" % base64string + headers['Authorization'] = authheader return headers @@ -94,10 +98,10 @@ Based on Wade Leftwich's function: http://code.activestate.com/recipes/146306/ """ - #use a random boundary that does not appear in the fields + # use a random boundary that does not appear in the fields boundary = '' while boundary in ''.join(fields): - boundary = os.urandom(16).encode('hex') + boundary = binascii.hexlify(os.urandom(16)).decode('utf-8') L = [] for i in fields: L.append('--' + boundary) @@ -108,7 +112,7 @@ L.append('') body = '\r\n'.join(L) content_type = 'multipart/form-data; boundary=%s' % boundary - header = {'Content-Type':content_type} + header = {'Content-Type': content_type} return header, body @@ -118,40 +122,43 @@ """ name = "Enomaly Elastic Computing Platform" + website = 'http://www.enomaly.com/' type = Provider.ECP connectionCls = ECPConnection def list_nodes(self): """ Returns a list of all running Nodes + + :rtype: ``list`` of :class:`Node` """ - #Make the call + # Make the call res = self.connection.request('/rest/hosting/vm/list').parse_body() - #Put together a list of node objects - nodes=[] + # Put together a list of node objects + nodes = [] for vm in res['vms']: node = self._to_node(vm) - if not node == None: + if node is not None: nodes.append(node) - #And return it + # And return it return nodes - def _to_node(self, vm): """ Turns a (json) dictionary into a Node object. This returns only running VMs. """ - #Check state + # Check state if not vm['state'] == "running": return None - #IPs - iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1'] + # IPs + iplist = [interface['ip'] for interface in vm['interfaces'] if + interface['ip'] != '127.0.0.1'] public_ips = [] private_ips = [] @@ -166,14 +173,14 @@ else: public_ips.append(ip) - #Create the node object + # Create the node object n = Node( - id=vm['uuid'], - name=vm['name'], - state=NodeState.RUNNING, - public_ip=public_ips, - private_ip=private_ips, - driver=self, + id=vm['uuid'], + name=vm['name'], + state=NodeState.RUNNING, + public_ips=public_ips, + private_ips=private_ips, + driver=self, ) return n @@ -181,34 +188,35 @@ def reboot_node(self, node): """ Shuts down a VM and then starts it again. + + @inherits: :class:`NodeDriver.reboot_node` """ - #Turn the VM off - #Black magic to make the POST requests work - d = self.connection._encode_multipart_formdata({'action':'stop'}) + # Turn the VM off + # Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( - '/rest/hosting/vm/%s' % node.id, - method='POST', - headers=d[0], - data=d[1] + '/rest/hosting/vm/%s' % node.id, + method='POST', + headers=d[0], + data=d[1] ).parse_body() node.state = NodeState.REBOOTING - #Wait for it to turn off and then continue (to turn it on again) + # Wait for it to turn off and then continue (to turn it on again) while node.state == NodeState.REBOOTING: - #Check if it's off. + # Check if it's off. response = self.connection.request( - '/rest/hosting/vm/%s' % node.id - ).parse_body() + '/rest/hosting/vm/%s' % node.id + ).parse_body() if response['vm']['state'] == 'off': node.state = NodeState.TERMINATED else: time.sleep(5) - - #Turn the VM back on. - #Black magic to make the POST requests work - d = self.connection._encode_multipart_formdata({'action':'start'}) + # Turn the VM back on. + # Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action': 'start'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, method='POST', @@ -222,34 +230,36 @@ def destroy_node(self, node): """ Shuts down and deletes a VM. + + @inherits: :class:`NodeDriver.destroy_node` """ - #Shut down first - #Black magic to make the POST requests work - d = self.connection._encode_multipart_formdata({'action':'stop'}) + # Shut down first + # Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, - method = 'POST', + method='POST', headers=d[0], data=d[1] ).parse_body() - #Ensure there was no applicationl level error + # Ensure there was no applicationl level error node.state = NodeState.PENDING - #Wait for the VM to turn off before continuing + # Wait for the VM to turn off before continuing while node.state == NodeState.PENDING: - #Check if it's off. + # Check if it's off. response = self.connection.request( - '/rest/hosting/vm/%s' % node.id - ).parse_body() + '/rest/hosting/vm/%s' % node.id + ).parse_body() if response['vm']['state'] == 'off': node.state = NodeState.TERMINATED else: time.sleep(5) - #Delete the VM - #Black magic to make the POST requests work - d = self.connection._encode_multipart_formdata({'action':'delete'}) + # Delete the VM + # Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action': 'delete'}) self.connection.request( '/rest/hosting/vm/%s' % (node.id), method='POST', @@ -261,99 +271,114 @@ def list_images(self, location=None): """ - Returns a list of all package templates aka appiances aka images + Returns a list of all package templates aka appiances aka images. + + @inherits: :class:`NodeDriver.list_images` """ - #Make the call + # Make the call response = self.connection.request( '/rest/hosting/ptemplate/list').parse_body() - #Turn the response into an array of NodeImage objects + # Turn the response into an array of NodeImage objects images = [] for ptemplate in response['packages']: images.append(NodeImage( - id = ptemplate['uuid'], - name= '%s: %s' % (ptemplate['name'], ptemplate['description']), - driver = self, - )) + id=ptemplate['uuid'], + name='%s: %s' % (ptemplate['name'], ptemplate['description']), + driver=self,) + ) return images - def list_sizes(self, location=None): """ Returns a list of all hardware templates + + @inherits: :class:`NodeDriver.list_sizes` """ - #Make the call + # Make the call response = self.connection.request( '/rest/hosting/htemplate/list').parse_body() - #Turn the response into an array of NodeSize objects + # Turn the response into an array of NodeSize objects sizes = [] for htemplate in response['templates']: sizes.append(NodeSize( - id = htemplate['uuid'], - name = htemplate['name'], - ram = htemplate['memory'], - disk = 0, #Disk is independent of hardware template - bandwidth = 0, #There is no way to keep track of bandwidth - price = 0, #The billing system is external - driver = self, - )) + id=htemplate['uuid'], + name=htemplate['name'], + ram=htemplate['memory'], + disk=0, # Disk is independent of hardware template. + bandwidth=0, # There is no way to keep track of bandwidth. + price=0, # The billing system is external. + driver=self,) + ) return sizes def list_locations(self): """ This feature does not exist in ECP. Returns hard coded dummy location. + + :rtype: ``list`` of :class:`NodeLocation` """ - return [ - NodeLocation(id=1, - name="Cloud", - country='', - driver=self), - ] + return [NodeLocation(id=1, + name="Cloud", + country='', + driver=self), + ] def create_node(self, **kwargs): """ Creates a virtual machine. - Parameters: name (string), image (NodeImage), size (NodeSize) + :keyword name: String with a name for this new node (required) + :type name: ``str`` + + :keyword size: The size of resources allocated to this node . + (required) + :type size: :class:`NodeSize` + + :keyword image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` + + :rtype: :class:`Node` """ - #Find out what network to put the VM on. - res = self.connection.request('/rest/hosting/network/list').parse_body() + # Find out what network to put the VM on. + res = self.connection.request( + '/rest/hosting/network/list').parse_body() - #Use the first / default network because there is no way to specific - #which one + # Use the first / default network because there is no way to specific + # which one network = res['networks'][0]['uuid'] - #Prepare to make the VM + # Prepare to make the VM data = { - 'name' : str(kwargs['name']), - 'package' : str(kwargs['image'].id), - 'hardware' : str(kwargs['size'].id), - 'network_uuid' : str(network), - 'disk' : '' + 'name': str(kwargs['name']), + 'package': str(kwargs['image'].id), + 'hardware': str(kwargs['size'].id), + 'network_uuid': str(network), + 'disk': '' } - #Black magic to make the POST requests work + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata(data) response = self.connection.request( '/rest/hosting/vm/', method='PUT', - headers = d[0], + headers=d[0], data=d[1] ).parse_body() - #Create a node object and return it. + # Create a node object and return it. n = Node( id=response['machine_id'], name=data['name'], state=NodeState.PENDING, - public_ip=[], - private_ip=[], + public_ips=[], + private_ips=[], driver=self, ) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/elastichosts.py libcloud-0.15.1/libcloud/compute/drivers/elastichosts.py --- libcloud-0.5.0/libcloud/compute/drivers/elastichosts.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/elastichosts.py 2013-11-29 12:35:04.000000000 +0000 @@ -12,106 +12,62 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """ ElasticHosts Driver """ -import re -import time -import base64 -import httplib - -try: - import json -except: - import simplejson as json - -from libcloud.common.base import ConnectionUserAndKey, Response -from libcloud.common.types import InvalidCredsError, MalformedResponseError -from libcloud.compute.types import Provider, NodeState -from libcloud.compute.base import NodeDriver, NodeSize, Node -from libcloud.compute.base import NodeImage -from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment, MultiStepDeployment + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver + # API end-points API_ENDPOINTS = { - 'uk-1': { + 'lon-p': { 'name': 'London Peer 1', 'country': 'United Kingdom', - 'host': 'api.lon-p.elastichosts.com' + 'host': 'api-lon-p.elastichosts.com' }, - 'uk-2': { + 'lon-b': { 'name': 'London BlueSquare', 'country': 'United Kingdom', - 'host': 'api.lon-b.elastichosts.com' + 'host': 'api-lon-b.elastichosts.com' }, - 'us-1': { + 'sat-p': { 'name': 'San Antonio Peer 1', 'country': 'United States', - 'host': 'api.sat-p.elastichosts.com' + 'host': 'api-sat-p.elastichosts.com' }, -} - -# Default API end-point for the base connection clase. -DEFAULT_ENDPOINT = 'us-1' - -# ElasticHosts doesn't specify special instance types, so I just specified -# some plans based on the pricing page -# (http://www.elastichosts.com/cloud-hosting/pricing) -# and other provides. -# -# Basically for CPU any value between 500Mhz and 20000Mhz should work, -# 256MB to 8192MB for ram and 1GB to 2TB for disk. -INSTANCE_TYPES = { - 'small': { - 'id': 'small', - 'name': 'Small instance', - 'cpu': 2000, - 'memory': 1700, - 'disk': 160, - 'bandwidth': None, - }, - 'medium': { - 'id': 'medium', - 'name': 'Medium instance', - 'cpu': 3000, - 'memory': 4096, - 'disk': 500, - 'bandwidth': None, - }, - 'large': { - 'id': 'large', - 'name': 'Large instance', - 'cpu': 4000, - 'memory': 7680, - 'disk': 850, - 'bandwidth': None, - }, - 'extra-large': { - 'id': 'extra-large', - 'name': 'Extra Large instance', - 'cpu': 8000, - 'memory': 8192, - 'disk': 1690, - 'bandwidth': None, - }, - 'high-cpu-medium': { - 'id': 'high-cpu-medium', - 'name': 'High-CPU Medium instance', - 'cpu': 5000, - 'memory': 1700, - 'disk': 350, - 'bandwidth': None, - }, - 'high-cpu-extra-large': { - 'id': 'high-cpu-extra-large', - 'name': 'High-CPU Extra Large instance', - 'cpu': 20000, - 'memory': 7168, - 'disk': 1690, - 'bandwidth': None, + 'lax-p': { + 'name': 'Los Angeles Peer 1', + 'country': 'United States', + 'host': 'api-lax-p.elastichosts.com' }, + 'sjc-c': { + 'name': 'San Jose (Silicon Valley)', + 'country': 'United States', + 'host': 'api-sjc-c.elastichosts.com' + }, + 'tor-p': { + 'name': 'Toronto Peer 1', + 'country': 'Canada', + 'host': 'api-tor-p.elastichosts.com' + }, + 'syd-y': { + 'name': 'Sydney', + 'country': 'Australia', + 'host': 'api-syd-v.elastichosts.com' + }, + 'cn-1': { + 'name': 'Hong Kong', + 'country': 'China', + 'host': 'api-hkg-e.elastichosts.com' + } } +# Default API end-point for the base connection class. +DEFAULT_REGION = 'sat-p' + # Retrieved from http://www.elastichosts.com/cloud-hosting/api STANDARD_DRIVES = { '38df0986-4d85-4b76-b502-3878ffc80161': { @@ -132,6 +88,12 @@ 'size_gunzipped': '1GB', 'supports_deployment': True, }, + '62f512cd-82c7-498e-88d8-a09ac2ef20e7': { + 'uuid': '62f512cd-82c7-498e-88d8-a09ac2ef20e7', + 'description': 'Ubuntu Linux 12.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': { 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0', 'description': 'Windows Web Server 2008', @@ -164,416 +126,111 @@ }, } -NODE_STATE_MAP = { - 'active': NodeState.RUNNING, - 'dead': NodeState.TERMINATED, - 'dumped': NodeState.TERMINATED, -} - -# Default timeout (in seconds) for the drive imaging process -IMAGING_TIMEOUT = 10 * 60 class ElasticHostsException(Exception): - """ - Exception class for ElasticHosts driver - """ - def __str__(self): return self.args[0] def __repr__(self): return "" % (self.args[0]) -class ElasticHostsResponse(Response): - def success(self): - if self.status == 401: - raise InvalidCredsError() - - return self.status >= 200 and self.status <= 299 - - def parse_body(self): - if not self.body: - return self.body - - try: - data = json.loads(self.body) - except: - raise MalformedResponseError("Failed to parse JSON", - body=self.body, - driver=ElasticHostsBaseNodeDriver) - - return data - - def parse_error(self): - error_header = self.headers.get('x-elastic-error', '') - return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip()) - -class ElasticHostsNodeSize(NodeSize): - def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): - self.id = id - self.name = name - self.cpu = cpu - self.ram = ram - self.disk = disk - self.bandwidth = bandwidth - self.price = price - self.driver = driver - - def __repr__(self): - return (('') - % (self.id, self.name, self.cpu, self.ram, - self.disk, self.bandwidth, self.price, self.driver.name)) - -class ElasticHostsBaseConnection(ConnectionUserAndKey): - """ - Base connection class for the ElasticHosts driver - """ - - host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] - responseCls = ElasticHostsResponse - - def add_default_headers(self, headers): - headers['Accept'] = 'application/json' - headers['Content-Type'] = 'application/json' - headers['Authorization'] = ('Basic %s' - % (base64.b64encode('%s:%s' - % (self.user_id, - self.key)))) - return headers -class ElasticHostsBaseNodeDriver(NodeDriver): +class ElasticHostsNodeDriver(ElasticStackBaseNodeDriver): """ - Base ElasticHosts node driver + Node Driver class for ElasticHosts """ - type = Provider.ELASTICHOSTS api_name = 'elastichosts' name = 'ElasticHosts' - connectionCls = ElasticHostsBaseConnection + website = 'http://www.elastichosts.com/' features = {"create_node": ["generates_password"]} + _standard_drives = STANDARD_DRIVES - def reboot_node(self, node): - # Reboots the node - response = self.connection.request( - action='/servers/%s/reset' % (node.id), - method='POST' - ) - return response.status == 204 - - def destroy_node(self, node): - # Kills the server immediately - response = self.connection.request( - action='/servers/%s/destroy' % (node.id), - method='POST' - ) - return response.status == 204 - - def list_images(self, location=None): - # Returns a list of available pre-installed system drive images - images = [] - for key, value in STANDARD_DRIVES.iteritems(): - image = NodeImage( - id=value['uuid'], - name=value['description'], - driver=self.connection.driver, - extra={ - 'size_gunzipped': value['size_gunzipped'] - } - ) - images.append(image) - - return images - - def list_sizes(self, location=None): - sizes = [] - for key, value in INSTANCE_TYPES.iteritems(): - size = ElasticHostsNodeSize( - id=value['id'], - name=value['name'], cpu=value['cpu'], ram=value['memory'], - disk=value['disk'], bandwidth=value['bandwidth'], - price=self._get_size_price(size_id=value['id']), - driver=self.connection.driver - ) - sizes.append(size) - - return sizes - - def list_nodes(self): - # Returns a list of active (running) nodes - response = self.connection.request(action='/servers/info').object - - nodes = [] - for data in response: - node = self._to_node(data) - nodes.append(node) - - return nodes - - def create_node(self, **kwargs): - """Creates a ElasticHosts instance - - See L{NodeDriver.create_node} for more keyword args. - - @keyword name: String with a name for this new node (required) - @type name: C{string} - - @keyword smp: Number of virtual processors or None to calculate - based on the cpu speed - @type smp: C{int} - - @keyword nic_model: e1000, rtl8139 or virtio - (if not specified, e1000 is used) - @type nic_model: C{string} - - @keyword vnc_password: If set, the same password is also used for - SSH access with user toor, - otherwise VNC access is disabled and - no SSH login is possible. - @type vnc_password: C{string} - """ - size = kwargs['size'] - image = kwargs['image'] - smp = kwargs.get('smp', 'auto') - nic_model = kwargs.get('nic_model', 'e1000') - vnc_password = ssh_password = kwargs.get('vnc_password', None) - - if nic_model not in ('e1000', 'rtl8139', 'virtio'): - raise ElasticHostsException('Invalid NIC model specified') - - # check that drive size is not smaller then pre installed image size - - # First we create a drive with the specified size - drive_data = {} - drive_data.update({'name': kwargs['name'], - 'size': '%sG' % (kwargs['size'].disk)}) - - response = self.connection.request(action='/drives/create', - data=json.dumps(drive_data), - method='POST').object - - if not response: - raise ElasticHostsException('Drive creation failed') - - drive_uuid = response['drive'] - - # Then we image the selected pre-installed system drive onto it - response = self.connection.request( - action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id), - method='POST' - ) - - if response.status != 204: - raise ElasticHostsException('Drive imaging failed') - - # We wait until the drive is imaged and then boot up the node - # (in most cases, the imaging process shouldn't take longer - # than a few minutes) - response = self.connection.request( - action='/drives/%s/info' % (drive_uuid) - ).object - imaging_start = time.time() - while response.has_key('imaging'): - response = self.connection.request( - action='/drives/%s/info' % (drive_uuid) - ).object - elapsed_time = time.time() - imaging_start - if (response.has_key('imaging') - and elapsed_time >= IMAGING_TIMEOUT): - raise ElasticHostsException('Drive imaging timed out') - time.sleep(1) - - node_data = {} - node_data.update({'name': kwargs['name'], - 'cpu': size.cpu, - 'mem': size.ram, - 'ide:0:0': drive_uuid, - 'boot': 'ide:0:0', - 'smp': smp}) - node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) - - if vnc_password: - node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) - - response = self.connection.request( - action='/servers/create', data=json.dumps(node_data), - method='POST' - ).object - - if isinstance(response, list): - nodes = [self._to_node(node, ssh_password) for node in response] - else: - nodes = self._to_node(response, ssh_password) - - return nodes - - # Extension methods - def ex_set_node_configuration(self, node, **kwargs): - # Changes the configuration of the running server - valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', - '^boot$', '^nic:0:model$', '^nic:0:dhcp', - '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', - '^vnc:ip$', '^vnc:password$', '^vnc:tls', - '^ide:[0-1]:[0-1](:media)?$', - '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') - - invalid_keys = [] - for key in kwargs.keys(): - matches = False - for regex in valid_keys: - if re.match(regex, key): - matches = True - break - if not matches: - invalid_keys.append(key) - - if invalid_keys: - raise ElasticHostsException( - 'Invalid configuration key specified: %s' - % (',' .join(invalid_keys)) - ) - - response = self.connection.request( - action='/servers/%s/set' % (node.id), data=json.dumps(kwargs), - method='POST' - ) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region=DEFAULT_REGION, **kwargs): - return (response.status == httplib.OK and response.body != '') + if hasattr(self, '_region'): + region = self._region - def deploy_node(self, **kwargs): - """ - Create a new node, and start deployment. + if region not in API_ENDPOINTS: + raise ValueError('Invalid region: %s' % (region)) - @keyword enable_root: If true, root password will be set to - vnc_password (this will enable SSH access) - and default 'toor' account will be deleted. - @type enable_root: C{bool} + self._host_argument_set = host is not None + super(ElasticHostsNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + region=region, **kwargs) - For detailed description and keywords args, see - L{NodeDriver.deploy_node}. + def _ex_connection_class_kwargs(self): + """ + Return the host value based on the user supplied region. """ - image = kwargs['image'] - vnc_password = kwargs.get('vnc_password', None) - enable_root = kwargs.get('enable_root', False) - - if not vnc_password: - raise ValueError('You need to provide vnc_password argument ' - 'if you want to use deployment') - - if (image in STANDARD_DRIVES - and STANDARD_DRIVES[image]['supports_deployment']): - raise ValueError('Image %s does not support deployment' - % (image.id)) - - if enable_root: - script = ("unset HISTFILE;" - "echo root:%s | chpasswd;" - "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" - "history -c") % vnc_password - root_enable_script = ScriptDeployment(script=script, - delete=True) - deploy = kwargs.get('deploy', None) - if deploy: - if (isinstance(deploy, ScriptDeployment) - or isinstance(deploy, SSHKeyDeployment)): - deployment = MultiStepDeployment([deploy, - root_enable_script]) - elif isinstance(deploy, MultiStepDeployment): - deployment = deploy - deployment.add(root_enable_script) - else: - deployment = root_enable_script - - kwargs['deploy'] = deployment - - if not kwargs.get('ssh_username', None): - kwargs['ssh_username'] = 'toor' - - return super(ElasticHostsBaseNodeDriver, self).deploy_node(**kwargs) - - def ex_shutdown_node(self, node): - # Sends the ACPI power-down event - response = self.connection.request( - action='/servers/%s/shutdown' % (node.id), - method='POST' - ) - return response.status == 204 - - def ex_destroy_drive(self, drive_uuid): - # Deletes a drive - response = self.connection.request( - action='/drives/%s/destroy' % (drive_uuid), - method='POST' - ) - return response.status == 204 - - # Helper methods - def _to_node(self, data, ssh_password=None): - try: - state = NODE_STATE_MAP[data['status']] - except KeyError: - state = NodeState.UNKNOWN - - if isinstance(data['nic:0:dhcp'], list): - public_ip = data['nic:0:dhcp'] - else: - public_ip = [data['nic:0:dhcp']] - - extra = {'cpu': data['cpu'], - 'smp': data['smp'], - 'mem': data['mem'], - 'started': data['started']} - - if data.has_key('vnc:ip') and data.has_key('vnc:password'): - extra.update({'vnc_ip': data['vnc:ip'], - 'vnc_password': data['vnc:password']}) - - if ssh_password: - extra.update({'password': ssh_password}) - - node = Node(id=data['server'], name=data['name'], state=state, - public_ip=public_ip, private_ip=None, - driver=self.connection.driver, - extra=extra) - - return node - -class ElasticHostsUK1Connection(ElasticHostsBaseConnection): + kwargs = {} + if not self._host_argument_set: + kwargs['host'] = API_ENDPOINTS[self.region]['host'] + + return kwargs + + +class ElasticHostsUK1NodeDriver(ElasticHostsNodeDriver): """ - Connection class for the ElasticHosts driver for - the London Peer 1 end-point + ElasticHosts node driver for the London Peer 1 end-point """ + name = 'ElasticHosts (lon-p)' + _region = 'lon-p' - host = API_ENDPOINTS['uk-1']['host'] -class ElasticHostsUK1NodeDriver(ElasticHostsBaseNodeDriver): +class ElasticHostsUK2NodeDriver(ElasticHostsNodeDriver): """ - ElasticHosts node driver for the London Peer 1 end-point + ElasticHosts node driver for the London Bluesquare end-point """ - connectionCls = ElasticHostsUK1Connection + name = 'ElasticHosts (lon-b)' + _region = 'lon-b' -class ElasticHostsUK2Connection(ElasticHostsBaseConnection): + +class ElasticHostsUS1NodeDriver(ElasticHostsNodeDriver): """ - Connection class for the ElasticHosts driver for - the London Bluesquare end-point + ElasticHosts node driver for the San Antonio Peer 1 end-point """ - host = API_ENDPOINTS['uk-2']['host'] + name = 'ElasticHosts (sat-p)' + _region = 'sat-p' + -class ElasticHostsUK2NodeDriver(ElasticHostsBaseNodeDriver): +class ElasticHostsUS2NodeDriver(ElasticHostsNodeDriver): """ - ElasticHosts node driver for the London Bluesquare end-point + ElasticHosts node driver for the Los Angeles Peer 1 end-point """ - connectionCls = ElasticHostsUK2Connection + name = 'ElasticHosts (lax-p)' + _region = 'lax-p' + -class ElasticHostsUS1Connection(ElasticHostsBaseConnection): +class ElasticHostsUS3NodeDriver(ElasticHostsNodeDriver): """ - Connection class for the ElasticHosts driver for - the San Antonio Peer 1 end-point + ElasticHosts node driver for the San Jose (Silicon Valley) end-point """ - host = API_ENDPOINTS['us-1']['host'] + name = 'ElasticHosts (sjc-c)' + _region = 'sjc-c' + -class ElasticHostsUS1NodeDriver(ElasticHostsBaseNodeDriver): +class ElasticHostsCA1NodeDriver(ElasticHostsNodeDriver): """ - ElasticHosts node driver for the San Antonio Peer 1 end-point + ElasticHosts node driver for the Toronto Peer 1 end-point + """ + name = 'ElasticHosts (tor-p)' + _region = 'tor-p' + + +class ElasticHostsAU1NodeDriver(ElasticHostsNodeDriver): + """ + ElasticHosts node driver for the Sydney end-point + """ + name = 'ElasticHosts (syd-y)' + _region = 'syd-y' + + +class ElasticHostsCN1NodeDriver(ElasticHostsNodeDriver): + """ + ElasticHosts node driver for the Hong Kong end-point """ - connectionCls = ElasticHostsUS1Connection + name = 'ElasticHosts (cn-1)' + _region = 'cn-1' diff -Nru libcloud-0.5.0/libcloud/compute/drivers/elasticstack.py libcloud-0.15.1/libcloud/compute/drivers/elasticstack.py --- libcloud-0.5.0/libcloud/compute/drivers/elasticstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/elasticstack.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,488 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Base driver for the providers based on the ElasticStack platform - +http://www.elasticstack.com. +""" + +import re +import time +import base64 + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node +from libcloud.compute.base import NodeImage +from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment +from libcloud.compute.deployment import MultiStepDeployment + + +NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, +} + +# Default timeout (in seconds) for the drive imaging process +IMAGING_TIMEOUT = 10 * 60 + +# ElasticStack doesn't specify special instance types, so I just specified +# some plans based on the other provider offerings. +# +# Basically for CPU any value between 500Mhz and 20000Mhz should work, +# 256MB to 8192MB for ram and 1GB to 2TB for disk. +INSTANCE_TYPES = { + 'small': { + 'id': 'small', + 'name': 'Small instance', + 'cpu': 2000, + 'memory': 1700, + 'disk': 160, + 'bandwidth': None, + }, + 'medium': { + 'id': 'medium', + 'name': 'Medium instance', + 'cpu': 3000, + 'memory': 4096, + 'disk': 500, + 'bandwidth': None, + }, + 'large': { + 'id': 'large', + 'name': 'Large instance', + 'cpu': 4000, + 'memory': 7680, + 'disk': 850, + 'bandwidth': None, + }, + 'extra-large': { + 'id': 'extra-large', + 'name': 'Extra Large instance', + 'cpu': 8000, + 'memory': 8192, + 'disk': 1690, + 'bandwidth': None, + }, + 'high-cpu-medium': { + 'id': 'high-cpu-medium', + 'name': 'High-CPU Medium instance', + 'cpu': 5000, + 'memory': 1700, + 'disk': 350, + 'bandwidth': None, + }, + 'high-cpu-extra-large': { + 'id': 'high-cpu-extra-large', + 'name': 'High-CPU Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 1690, + 'bandwidth': None, + }, +} + + +class ElasticStackException(Exception): + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + + +class ElasticStackResponse(JsonResponse): + def success(self): + if self.status == 401: + raise InvalidCredsError() + + return self.status >= 200 and self.status <= 299 + + def parse_error(self): + error_header = self.headers.get('x-elastic-error', '') + return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip()) + + +class ElasticStackNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, + self.disk, self.bandwidth, self.price, self.driver.name)) + + +class ElasticStackBaseConnection(ConnectionUserAndKey): + """ + Base connection class for the ElasticStack driver + """ + + host = None + responseCls = ElasticStackResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + headers['Authorization'] = \ + ('Basic %s' % (base64.b64encode(b('%s:%s' % (self.user_id, + self.key)))) + .decode('utf-8')) + return headers + + +class ElasticStackBaseNodeDriver(NodeDriver): + website = 'http://www.elasticstack.com' + connectionCls = ElasticStackBaseConnection + features = {"create_node": ["generates_password"]} + + def reboot_node(self, node): + # Reboots the node + response = self.connection.request( + action='/servers/%s/reset' % (node.id), + method='POST' + ) + return response.status == 204 + + def destroy_node(self, node): + # Kills the server immediately + response = self.connection.request( + action='/servers/%s/destroy' % (node.id), + method='POST' + ) + return response.status == 204 + + def list_images(self, location=None): + # Returns a list of available pre-installed system drive images + images = [] + for key, value in self._standard_drives.items(): + image = NodeImage( + id=value['uuid'], + name=value['description'], + driver=self.connection.driver, + extra={ + 'size_gunzipped': value['size_gunzipped'] + } + ) + images.append(image) + + return images + + def list_sizes(self, location=None): + sizes = [] + for key, value in INSTANCE_TYPES.items(): + size = ElasticStackNodeSize( + id=value['id'], + name=value['name'], cpu=value['cpu'], ram=value['memory'], + disk=value['disk'], bandwidth=value['bandwidth'], + price=self._get_size_price(size_id=value['id']), + driver=self.connection.driver + ) + sizes.append(size) + + return sizes + + def list_nodes(self): + # Returns a list of active (running) nodes + response = self.connection.request(action='/servers/info').object + + nodes = [] + for data in response: + node = self._to_node(data) + nodes.append(node) + + return nodes + + def create_node(self, **kwargs): + """Creates a ElasticStack instance + + @inherits: :class:`NodeDriver.create_node` + + :keyword name: String with a name for this new node (required) + :type name: ``str`` + + :keyword smp: Number of virtual processors or None to calculate + based on the cpu speed + :type smp: ``int`` + + :keyword nic_model: e1000, rtl8139 or virtio + (if not specified, e1000 is used) + :type nic_model: ``str`` + + :keyword vnc_password: If set, the same password is also used for + SSH access with user toor, + otherwise VNC access is disabled and + no SSH login is possible. + :type vnc_password: ``str`` + """ + size = kwargs['size'] + image = kwargs['image'] + smp = kwargs.get('smp', 'auto') + nic_model = kwargs.get('nic_model', 'e1000') + vnc_password = ssh_password = kwargs.get('vnc_password', None) + + if nic_model not in ('e1000', 'rtl8139', 'virtio'): + raise ElasticStackException('Invalid NIC model specified') + + # check that drive size is not smaller than pre installed image size + + # First we create a drive with the specified size + drive_data = {} + drive_data.update({'name': kwargs['name'], + 'size': '%sG' % (kwargs['size'].disk)}) + + response = self.connection.request(action='/drives/create', + data=json.dumps(drive_data), + method='POST').object + + if not response: + raise ElasticStackException('Drive creation failed') + + drive_uuid = response['drive'] + + # Then we image the selected pre-installed system drive onto it + response = self.connection.request( + action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id), + method='POST' + ) + + if response.status not in (200, 204): + raise ElasticStackException('Drive imaging failed') + + # We wait until the drive is imaged and then boot up the node + # (in most cases, the imaging process shouldn't take longer + # than a few minutes) + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + + imaging_start = time.time() + while 'imaging' in response: + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + + elapsed_time = time.time() - imaging_start + if ('imaging' in response and elapsed_time >= IMAGING_TIMEOUT): + raise ElasticStackException('Drive imaging timed out') + + time.sleep(1) + + node_data = {} + node_data.update({'name': kwargs['name'], + 'cpu': size.cpu, + 'mem': size.ram, + 'ide:0:0': drive_uuid, + 'boot': 'ide:0:0', + 'smp': smp}) + node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) + + if vnc_password: + node_data.update({'vnc': 'auto', 'vnc:password': vnc_password}) + + response = self.connection.request( + action='/servers/create', data=json.dumps(node_data), + method='POST' + ).object + + if isinstance(response, list): + nodes = [self._to_node(node, ssh_password) for node in response] + else: + nodes = self._to_node(response, ssh_password) + + return nodes + + # Extension methods + def ex_set_node_configuration(self, node, **kwargs): + """ + Changes the configuration of the running server + + :param node: Node which should be used + :type node: :class:`Node` + + :param kwargs: keyword arguments + :type kwargs: ``dict`` + + :rtype: ``bool`` + """ + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', + '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', + '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', + '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + + invalid_keys = [] + keys = list(kwargs.keys()) + for key in keys: + matches = False + for regex in valid_keys: + if re.match(regex, key): + matches = True + break + if not matches: + invalid_keys.append(key) + + if invalid_keys: + raise ElasticStackException( + 'Invalid configuration key specified: %s' + % (',' .join(invalid_keys)) + ) + + response = self.connection.request( + action='/servers/%s/set' % (node.id), data=json.dumps(kwargs), + method='POST' + ) + + return (response.status == httplib.OK and response.body != '') + + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + @inherits: :class:`NodeDriver.deploy_node` + + :keyword enable_root: If true, root password will be set to + vnc_password (this will enable SSH access) + and default 'toor' account will be deleted. + :type enable_root: ``bool`` + """ + image = kwargs['image'] + vnc_password = kwargs.get('vnc_password', None) + enable_root = kwargs.get('enable_root', False) + + if not vnc_password: + raise ValueError('You need to provide vnc_password argument ' + 'if you want to use deployment') + + if (image in self._standard_drives and + not self._standard_drives[image]['supports_deployment']): + raise ValueError('Image %s does not support deployment' + % (image.id)) + + if enable_root: + script = ("unset HISTFILE;" + "echo root:%s | chpasswd;" + "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" + "history -c") % vnc_password + root_enable_script = ScriptDeployment(script=script, + delete=True) + deploy = kwargs.get('deploy', None) + if deploy: + if (isinstance(deploy, ScriptDeployment) or + isinstance(deploy, SSHKeyDeployment)): + deployment = MultiStepDeployment([deploy, + root_enable_script]) + elif isinstance(deploy, MultiStepDeployment): + deployment = deploy + deployment.add(root_enable_script) + else: + deployment = root_enable_script + + kwargs['deploy'] = deployment + + if not kwargs.get('ssh_username', None): + kwargs['ssh_username'] = 'toor' + + return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs) + + def ex_shutdown_node(self, node): + """ + Sends the ACPI power-down event + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + response = self.connection.request( + action='/servers/%s/shutdown' % (node.id), + method='POST' + ) + return response.status == 204 + + def ex_destroy_drive(self, drive_uuid): + """ + Deletes a drive + + :param drive_uuid: Drive uuid which should be used + :type drive_uuid: ``str`` + + :rtype: ``bool`` + """ + response = self.connection.request( + action='/drives/%s/destroy' % (drive_uuid), + method='POST' + ) + return response.status == 204 + + # Helper methods + def _to_node(self, data, ssh_password=None): + try: + state = NODE_STATE_MAP[data['status']] + except KeyError: + state = NodeState.UNKNOWN + + if isinstance(data['nic:0:dhcp'], list): + public_ip = data['nic:0:dhcp'] + else: + public_ip = [data['nic:0:dhcp']] + + extra = {'cpu': data['cpu'], + 'smp': data['smp'], + 'mem': data['mem'], + 'started': data['started']} + + if 'vnc:ip' in data: + extra['vnc:ip'] = data['vnc:ip'] + + if 'vnc:password' in data: + extra['vnc:password'] = data['vnc:password'] + + boot_device = data['boot'] + + if isinstance(boot_device, list): + for device in boot_device: + extra[device] = data[device] + else: + extra[boot_device] = data[boot_device] + + if ssh_password: + extra.update({'password': ssh_password}) + + node = Node(id=data['server'], name=data['name'], state=state, + public_ips=public_ip, private_ips=None, + driver=self.connection.driver, + extra=extra) + + return node diff -Nru libcloud-0.5.0/libcloud/compute/drivers/exoscale.py libcloud-0.15.1/libcloud/compute/drivers/exoscale.py --- libcloud-0.5.0/libcloud/compute/drivers/exoscale.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/exoscale.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.providers import Provider +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + +__all__ = [ + 'ExoscaleNodeDriver' +] + + +class ExoscaleNodeDriver(CloudStackNodeDriver): + type = Provider.EXOSCALE + name = 'Exoscale' + website = 'https://www.exoscale.ch/' + + # API endpoint info + host = 'api.exoscale.ch' + path = '/compute' diff -Nru libcloud-0.5.0/libcloud/compute/drivers/gandi.py libcloud-0.15.1/libcloud/compute/drivers/gandi.py --- libcloud-0.5.0/libcloud/compute/drivers/gandi.py 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/gandi.py 2013-11-29 12:35:04.000000000 +0000 @@ -13,152 +13,102 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Gandi driver +Gandi driver for compute """ +import sys +from datetime import datetime -import time -import xmlrpclib +from libcloud.common.gandi import BaseGandiDriver, GandiException,\ + NetworkInterface, IPAddress, Disk +from libcloud.compute.base import StorageVolume +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation -import libcloud -from libcloud.compute.types import Provider, NodeState -from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage - -# Global constants -API_VERSION = '2.0' -API_PREFIX = "https://rpc.gandi.net/xmlrpc/%s/" % API_VERSION - -DEFAULT_TIMEOUT = 600 # operation pooling max seconds -DEFAULT_INTERVAL = 20 # seconds between 2 operation.info NODE_STATE_MAP = { 'running': NodeState.RUNNING, 'halted': NodeState.TERMINATED, 'paused': NodeState.TERMINATED, - 'locked' : NodeState.TERMINATED, - 'being_created' : NodeState.PENDING, - 'invalid' : NodeState.UNKNOWN, - 'legally_locked' : NodeState.PENDING, - 'deleted' : NodeState.TERMINATED + 'locked': NodeState.TERMINATED, + 'being_created': NodeState.PENDING, + 'invalid': NodeState.UNKNOWN, + 'legally_locked': NodeState.PENDING, + 'deleted': NodeState.TERMINATED } NODE_PRICE_HOURLY_USD = 0.02 -class GandiException(Exception): - """ - Exception class for Gandi driver - """ - def __str__(self): - return "(%u) %s" % (self.args[0], self.args[1]) - def __repr__(self): - return "" % (self.args[0], self.args[1]) - -class GandiSafeTransport(xmlrpclib.SafeTransport): - pass - -class GandiTransport(xmlrpclib.Transport): - pass - -class GandiProxy(xmlrpclib.ServerProxy): - transportCls = (GandiTransport, GandiSafeTransport) - - def __init__(self,user_agent, verbose=0): - cls = self.transportCls[0] - if API_PREFIX.startswith("https://"): - cls = self.transportCls[1] - t = cls(use_datetime=0) - t.user_agent = user_agent - xmlrpclib.ServerProxy.__init__( - self, - uri="%s" % (API_PREFIX), - transport=t, - verbose=verbose, - allow_none=True - ) - -class GandiConnection(object): - """ - Connection class for the Gandi driver - """ - - proxyCls = GandiProxy - driver = 'gandi' - - def __init__(self, user, password=None): - self.ua = [] - - # Connect only with an api_key generated on website - self.api_key = user - - try: - self._proxy = self.proxyCls(self._user_agent()) - except xmlrpclib.Fault, e: - raise GandiException(1000, e) - - def _user_agent(self): - return 'libcloud/%s (%s)%s' % ( - libcloud.__version__, - self.driver, - "".join([" (%s)" % x for x in self.ua])) - - def user_agent_append(self, s): - self.ua.append(s) - - def request(self,method,*args): - """ Request xmlrpc method with given args""" - try: - return getattr(self._proxy, method)(self.api_key,*args) - except xmlrpclib.Fault, e: - raise GandiException(1001, e) +INSTANCE_TYPES = { + 'small': { + 'id': 'small', + 'name': 'Small instance', + 'cpu': 1, + 'memory': 256, + 'disk': 3, + 'bandwidth': 10240, + }, + 'medium': { + 'id': 'medium', + 'name': 'Medium instance', + 'cpu': 1, + 'memory': 1024, + 'disk': 20, + 'bandwidth': 10240, + }, + 'large': { + 'id': 'large', + 'name': 'Large instance', + 'cpu': 2, + 'memory': 2048, + 'disk': 50, + 'bandwidth': 10240, + }, + 'x-large': { + 'id': 'x-large', + 'name': 'Extra Large instance', + 'cpu': 4, + 'memory': 4096, + 'disk': 100, + 'bandwidth': 10240, + }, +} -class GandiNodeDriver(NodeDriver): +class GandiNodeDriver(BaseGandiDriver, NodeDriver): """ Gandi node driver """ - connectionCls = GandiConnection - name = 'Gandi' api_name = 'gandi' friendly_name = 'Gandi.net' + website = 'http://www.gandi.net/' country = 'FR' type = Provider.GANDI # TODO : which features to enable ? - features = { } - - def __init__(self, key, secret=None, secure=False): - self.key = key - self.secret = secret - self.connection = self.connectionCls(key, secret) - self.connection.driver = self - - # Specific methods for gandi - def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): - """ Wait for an operation to succeed""" - - for i in range(0, timeout, check_interval): - try: - op = self.connection.request('operation.info', int(id)) - - if op['step'] == 'DONE': - return True - if op['step'] in ['ERROR','CANCEL']: - return False - except (KeyError, IndexError): - pass - except Exception, e: - raise GandiException(1002, e) + features = {} - time.sleep(check_interval) - return False + def __init__(self, *args, **kwargs): + """ + @inherits: :class:`NodeDriver.__init__` + """ + super(BaseGandiDriver, self).__init__(*args, **kwargs) - def _node_info(self,id): + def _resource_info(self, type, id): try: - obj = self.connection.request('vm.info',int(id)) - return obj - except Exception,e: + obj = self.connection.request('hosting.%s.info' % type, int(id)) + return obj.object + except Exception: + e = sys.exc_info()[1] raise GandiException(1003, e) return None + def _node_info(self, id): + return self._resource_info('vm', id) + + def _volume_info(self, id): + return self._resource_info('disk', id) + # Generic methods for driver def _to_node(self, vm): return Node( @@ -168,34 +118,49 @@ vm['state'], NodeState.UNKNOWN ), - public_ip=vm.get('ip'), - private_ip='', + public_ips=vm.get('ips', []), + private_ips=[], driver=self, extra={ - 'ai_active' : vm.get('ai_active'), - 'datacenter_id' : vm.get('datacenter_id'), - 'description' : vm.get('description') + 'ai_active': vm.get('ai_active'), + 'datacenter_id': vm.get('datacenter_id'), + 'description': vm.get('description') } ) def _to_nodes(self, vms): return [self._to_node(v) for v in vms] + def _to_volume(self, disk): + extra = {'can_snapshot': disk['can_snapshot']} + return StorageVolume( + id=disk['id'], + name=disk['name'], + size=int(disk['size']), + driver=self, + extra=extra) + + def _to_volumes(self, disks): + return [self._to_volume(d) for d in disks] + def list_nodes(self): - vms = self.connection.request('vm.list') - ips = self.connection.request('ip.list') + vms = self.connection.request('hosting.vm.list').object + ips = self.connection.request('hosting.ip.list').object for vm in vms: + vm['ips'] = [] for ip in ips: if vm['ifaces_id'][0] == ip['iface_id']: - vm['ip'] = ip.get('ip') + ip = ip.get('ip', None) + if ip: + vm['ips'].append(ip) nodes = self._to_nodes(vms) return nodes def reboot_node(self, node): - op = self.connection.request('vm.reboot',int(node.id)) - op_res = self._wait_operation(op['id']) - vm = self.connection.request('vm.info',int(node.id)) + op = self.connection.request('hosting.vm.reboot', int(node.id)) + self._wait_operation(op.object['id']) + vm = self._node_info(int(node.id)) if vm['state'] == 'running': return True return False @@ -204,65 +169,80 @@ vm = self._node_info(node.id) if vm['state'] == 'running': # Send vm_stop and wait for accomplish - op_stop = self.connection.request('vm.stop',int(node.id)) - if not self._wait_operation(op_stop['id']): + op_stop = self.connection.request('hosting.vm.stop', int(node.id)) + if not self._wait_operation(op_stop.object['id']): raise GandiException(1010, 'vm.stop failed') - # Delete - op = self.connection.request('vm.delete',int(node.id)) - if self._wait_operation(op['id']): + # Delete + op = self.connection.request('hosting.vm.delete', int(node.id)) + if self._wait_operation(op.object['id']): return True return False def deploy_node(self, **kwargs): - raise NotImplementedError, \ - 'deploy_node not implemented for gandi driver' + """ + deploy_node is not implemented for gandi driver + + :rtype: ``bool`` + """ + raise NotImplementedError( + 'deploy_node not implemented for gandi driver') def create_node(self, **kwargs): - """Create a new Gandi node + """ + Create a new Gandi node - @keyword name: String with a name for this new node (required) - @type name: str + :keyword name: String with a name for this new node (required) + :type name: ``str`` - @keyword image: OS Image to boot on node. (required) - @type image: L{NodeImage} + :keyword image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` - @keyword location: Which data center to create a node in. If empty, - undefined behavoir will be selected. (optional) - @type location: L{NodeLocation} + :keyword location: Which data center to create a node in. If empty, + undefined behavior will be selected. (optional) + :type location: :class:`NodeLocation` - @keyword size: The size of resources allocated to this node. + :keyword size: The size of resources allocated to this node. (required) - @type size: L{NodeSize} + :type size: :class:`NodeSize` - @keyword login: user name to create for login on this machine (required) - @type login: String + :keyword login: user name to create for login on machine (required) + :type login: ``str`` - @keyword password: password for user that'll be created (required) - @type password: String + :keyword password: password for user that'll be created (required) + :type password: ``str`` - @keywork inet_family: version of ip to use, default 4 (optional) - @type inet_family: int + :keyword inet_family: version of ip to use, default 4 (optional) + :type inet_family: ``int`` + + :rtype: :class:`Node` """ if kwargs.get('login') is None or kwargs.get('password') is None: - raise GandiException(1020, 'login and password must be defined for node creation') + raise GandiException( + 1020, 'login and password must be defined for node creation') location = kwargs.get('location') - if location and isinstance(location,NodeLocation): + if location and isinstance(location, NodeLocation): dc_id = int(location.id) else: - raise GandiException(1021, 'location must be a subclass of NodeLocation') + raise GandiException( + 1021, 'location must be a subclass of NodeLocation') size = kwargs.get('size') - if not size and not isinstance(size,NodeSize): - raise GandiException(1022, 'size must be a subclass of NodeSize') + if not size and not isinstance(size, NodeSize): + raise GandiException( + 1022, 'size must be a subclass of NodeSize') + + # If size name is in INSTANCE_TYPE we use new rating model + instance = INSTANCE_TYPES.get(size.id) + cores = instance['cpu'] if instance else int(size.id) src_disk_id = int(kwargs['image'].id) disk_spec = { 'datacenter_id': dc_id, 'name': 'disk_%s' % kwargs['name'] - } + } vm_spec = { 'datacenter_id': dc_id, @@ -270,21 +250,22 @@ 'login': kwargs['login'], 'password': kwargs['password'], # TODO : use NodeAuthPassword 'memory': int(size.ram), - 'cores': int(size.id), - 'bandwidth' : int(size.bandwidth), - 'ip_version': kwargs.get('inet_family',4), - } + 'cores': cores, + 'bandwidth': int(size.bandwidth), + 'ip_version': kwargs.get('inet_family', 4), + } # Call create_from helper api. Return 3 operations : disk_create, # iface_create,vm_create - (op_disk,op_iface,op_vm) = self.connection.request( - 'vm.create_from', - vm_spec,disk_spec,src_disk_id - ) + (op_disk, op_iface, op_vm) = self.connection.request( + 'hosting.vm.create_from', + vm_spec, disk_spec, src_disk_id + ).object # We wait for vm_create to finish if self._wait_operation(op_vm['id']): - # after successful operation, get ip information thru first interface + # after successful operation, get ip information + # thru first interface node = self._node_info(op_vm['vm_id']) ifaces = node.get('ifaces') if len(ifaces) > 0: @@ -305,12 +286,13 @@ def list_images(self, location=None): try: if location: - filtering = { 'datacenter_id' : int(location.id) } + filtering = {'datacenter_id': int(location.id)} else: filtering = {} - images = self.connection.request('image.list', filtering ) - return [self._to_image(i) for i in images] - except Exception, e: + images = self.connection.request('hosting.image.list', filtering) + return [self._to_image(i) for i in images.object] + except Exception: + e = sys.exc_info()[1] raise GandiException(1011, e) def _to_size(self, id, size): @@ -324,8 +306,26 @@ driver=self.connection.driver, ) + def _instance_type_to_size(self, instance): + return NodeSize( + id=instance['id'], + name=instance['name'], + ram=instance['memory'], + disk=instance['disk'], + bandwidth=instance['bandwidth'], + price=self._get_size_price(size_id=instance['id']), + driver=self.connection.driver, + ) + + def list_instance_type(self, location=None): + return [self._instance_type_to_size(instance) + for name, instance in INSTANCE_TYPES.items()] + def list_sizes(self, location=None): - account = self.connection.request('account.info') + account = self.connection.request('hosting.account.info').object + if account.get('rating_enabled'): + # This account use new rating model + return self.list_instance_type(location) # Look for available shares, and return a list of share_definition available_res = account['resources']['available'] @@ -340,8 +340,8 @@ if available_res['servers'] < 1: # No server quota, no way return shares - for i in range(1,max_core + 1): - share = {id:i} + for i in range(1, max_core + 1): + share = {id: i} share_is_available = True for k in ['memory', 'disk', 'bandwidth']: if share_def[k] * i > available_res[k]: @@ -351,7 +351,7 @@ share[k] = share_def[k] * i if share_is_available: nb_core = i - shares.append(self._to_size(nb_core,share)) + shares.append(self._to_size(nb_core, share)) return shares def _to_loc(self, loc): @@ -363,5 +363,257 @@ ) def list_locations(self): - res = self.connection.request("datacenter.list") - return [self._to_loc(l) for l in res] + res = self.connection.request('hosting.datacenter.list') + return [self._to_loc(l) for l in res.object] + + def list_volumes(self): + """ + + :rtype: ``list`` of :class:`StorageVolume` + """ + res = self.connection.request('hosting.disk.list', {}) + return self._to_volumes(res.object) + + def create_volume(self, size, name, location=None, snapshot=None): + disk_param = { + 'name': name, + 'size': int(size), + 'datacenter_id': int(location.id) + } + if snapshot: + op = self.connection.request('hosting.disk.create_from', + disk_param, int(snapshot.id)) + else: + op = self.connection.request('hosting.disk.create', disk_param) + if self._wait_operation(op.object['id']): + disk = self._volume_info(op.object['disk_id']) + return self._to_volume(disk) + return None + + def attach_volume(self, node, volume, device=None): + op = self.connection.request('hosting.vm.disk_attach', + int(node.id), int(volume.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def detach_volume(self, node, volume): + """ + Detaches a volume from a node. + + :param node: Node which should be used + :type node: :class:`Node` + + :param volume: Volume to be detached + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + op = self.connection.request('hosting.vm.disk_detach', + int(node.id), int(volume.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def destroy_volume(self, volume): + op = self.connection.request('hosting.disk.delete', int(volume.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def _to_iface(self, iface): + ips = [] + for ip in iface.get('ips', []): + new_ip = IPAddress( + ip['id'], + NODE_STATE_MAP.get( + ip['state'], + NodeState.UNKNOWN + ), + ip['ip'], + self.connection.driver, + version=ip.get('version'), + extra={'reverse': ip['reverse']} + ) + ips.append(new_ip) + return NetworkInterface( + iface['id'], + NODE_STATE_MAP.get( + iface['state'], + NodeState.UNKNOWN + ), + mac_address=None, + driver=self.connection.driver, + ips=ips, + node_id=iface.get('vm_id'), + extra={'bandwidth': iface['bandwidth']}, + ) + + def _to_ifaces(self, ifaces): + return [self._to_iface(i) for i in ifaces] + + def ex_list_interfaces(self): + """ + Specific method to list network interfaces + + :rtype: ``list`` of :class:`GandiNetworkInterface` + """ + ifaces = self.connection.request('hosting.iface.list').object + ips = self.connection.request('hosting.ip.list').object + for iface in ifaces: + iface['ips'] = list( + filter(lambda i: i['iface_id'] == iface['id'], ips)) + return self._to_ifaces(ifaces) + + def _to_disk(self, element): + disk = Disk( + id=element['id'], + state=NODE_STATE_MAP.get( + element['state'], + NodeState.UNKNOWN + ), + name=element['name'], + driver=self.connection.driver, + size=element['size'], + extra={'can_snapshot': element['can_snapshot']} + ) + return disk + + def _to_disks(self, elements): + return [self._to_disk(el) for el in elements] + + def ex_list_disks(self): + """ + Specific method to list all disk + + :rtype: ``list`` of :class:`GandiDisk` + """ + res = self.connection.request('hosting.disk.list', {}) + return self._to_disks(res.object) + + def ex_node_attach_disk(self, node, disk): + """ + Specific method to attach a disk to a node + + :param node: Node which should be used + :type node: :class:`Node` + + :param disk: Disk which should be used + :type disk: :class:`GandiDisk` + + :rtype: ``bool`` + """ + op = self.connection.request('hosting.vm.disk_attach', + int(node.id), int(disk.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def ex_node_detach_disk(self, node, disk): + """ + Specific method to detach a disk from a node + + :param node: Node which should be used + :type node: :class:`Node` + + :param disk: Disk which should be used + :type disk: :class:`GandiDisk` + + :rtype: ``bool`` + """ + op = self.connection.request('hosting.vm.disk_detach', + int(node.id), int(disk.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def ex_node_attach_interface(self, node, iface): + """ + Specific method to attach an interface to a node + + :param node: Node which should be used + :type node: :class:`Node` + + + :param iface: Network interface which should be used + :type iface: :class:`GandiNetworkInterface` + + :rtype: ``bool`` + """ + op = self.connection.request('hosting.vm.iface_attach', + int(node.id), int(iface.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def ex_node_detach_interface(self, node, iface): + """ + Specific method to detach an interface from a node + + :param node: Node which should be used + :type node: :class:`Node` + + + :param iface: Network interface which should be used + :type iface: :class:`GandiNetworkInterface` + + :rtype: ``bool`` + """ + op = self.connection.request('hosting.vm.iface_detach', + int(node.id), int(iface.id)) + if self._wait_operation(op.object['id']): + return True + return False + + def ex_snapshot_disk(self, disk, name=None): + """ + Specific method to make a snapshot of a disk + + :param disk: Disk which should be used + :type disk: :class:`GandiDisk` + + :param name: Name which should be used + :type name: ``str`` + + :rtype: ``bool`` + """ + if not disk.extra.get('can_snapshot'): + raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id) + if not name: + suffix = datetime.today().strftime('%Y%m%d') + name = 'snap_%s' % (suffix) + op = self.connection.request( + 'hosting.disk.create_from', + {'name': name, 'type': 'snapshot', }, + int(disk.id), + ) + if self._wait_operation(op.object['id']): + return True + return False + + def ex_update_disk(self, disk, new_size=None, new_name=None): + """Specific method to update size or name of a disk + WARNING: if a server is attached it'll be rebooted + + :param disk: Disk which should be used + :type disk: :class:`GandiDisk` + + :param new_size: New size + :type new_size: ``int`` + + :param new_name: New name + :type new_name: ``str`` + + :rtype: ``bool`` + """ + params = {} + if new_size: + params.update({'size': new_size}) + if new_name: + params.update({'name': new_name}) + op = self.connection.request('hosting.disk.update', + int(disk.id), + params) + if self._wait_operation(op.object['id']): + return True + return False diff -Nru libcloud-0.5.0/libcloud/compute/drivers/gce.py libcloud-0.15.1/libcloud/compute/drivers/gce.py --- libcloud-0.5.0/libcloud/compute/drivers/gce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/gce.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,3346 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Module for Google Compute Engine Driver. +""" +from __future__ import with_statement + +import datetime +import time +import sys + +from libcloud.common.google import GoogleResponse +from libcloud.common.google import GoogleBaseConnection +from libcloud.common.google import GoogleBaseError +from libcloud.common.google import ResourceNotFoundError +from libcloud.common.google import ResourceExistsError + +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot +from libcloud.compute.base import UuidMixin +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState + +API_VERSION = 'v1' +DEFAULT_TASK_COMPLETION_TIMEOUT = 180 + + +def timestamp_to_datetime(timestamp): + """ + Return a datetime object that corresponds to the time in an RFC3339 + timestamp. + + :param timestamp: RFC3339 timestamp string + :type timestamp: ``str`` + + :return: Datetime object corresponding to timestamp + :rtype: :class:`datetime.datetime` + """ + # We remove timezone offset and microseconds (Python 2.5 strptime doesn't + # support %f) + ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') + tz_hours = int(timestamp[-5:-3]) + tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1') + tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins) + return ts + tz_delta + + +class GCEResponse(GoogleResponse): + pass + + +class GCEConnection(GoogleBaseConnection): + """Connection class for the GCE driver.""" + host = 'www.googleapis.com' + responseCls = GCEResponse + + def __init__(self, user_id, key, secure, auth_type=None, + credential_file=None, project=None, **kwargs): + super(GCEConnection, self).__init__(user_id, key, secure=secure, + auth_type=auth_type, + credential_file=credential_file, + **kwargs) + self.request_path = '/compute/%s/projects/%s' % (API_VERSION, + project) + + +class GCEAddress(UuidMixin): + """A GCE Static address.""" + def __init__(self, id, name, address, region, driver, extra=None): + self.id = str(id) + self.name = name + self.address = address + self.region = region + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this address. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_address(address=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.address) + + +class GCEFailedDisk(object): + """Dummy Node object for disks that are not created.""" + def __init__(self, name, error, code): + self.name = name + self.error = error + self.code = code + + def __repr__(self): + return '' % ( + self.name, self.code) + + +class GCEFailedNode(object): + """Dummy Node object for nodes that are not created.""" + def __init__(self, name, error, code): + self.name = name + self.error = error + self.code = code + + def __repr__(self): + return '' % ( + self.name, self.code) + + +class GCEHealthCheck(UuidMixin): + """A GCE Http Health Check class.""" + def __init__(self, id, name, path, port, interval, timeout, + unhealthy_threshold, healthy_threshold, driver, extra=None): + self.id = str(id) + self.name = name + self.path = path + self.port = port + self.interval = interval + self.timeout = timeout + self.unhealthy_threshold = unhealthy_threshold + self.healthy_threshold = healthy_threshold + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this Health Check. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_healthcheck(healthcheck=self) + + def update(self): + """ + Commit updated healthcheck values. + + :return: Updated Healthcheck object + :rtype: :class:`GCEHealthcheck` + """ + return self.driver.ex_update_healthcheck(healthcheck=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.path, self.port) + + +class GCEFirewall(UuidMixin): + """A GCE Firewall rule class.""" + def __init__(self, id, name, allowed, network, source_ranges, source_tags, + target_tags, driver, extra=None): + self.id = str(id) + self.name = name + self.network = network + self.allowed = allowed + self.source_ranges = source_ranges + self.source_tags = source_tags + self.target_tags = target_tags + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this firewall. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_firewall(firewall=self) + + def update(self): + """ + Commit updated firewall values. + + :return: Updated Firewall object + :rtype: :class:`GCEFirewall` + """ + return self.driver.ex_update_firewall(firewall=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.network.name) + + +class GCEForwardingRule(UuidMixin): + def __init__(self, id, name, region, address, protocol, targetpool, driver, + extra=None): + self.id = str(id) + self.name = name + self.region = region + self.address = address + self.protocol = protocol + self.targetpool = targetpool + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this Forwarding Rule + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_forwarding_rule(forwarding_rule=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.address) + + +class GCENodeImage(NodeImage): + """A GCE Node Image class.""" + def __init__(self, id, name, driver, extra=None): + super(GCENodeImage, self).__init__(id, name, driver, extra=extra) + + def delete(self): + """ + Delete this image + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_delete_image(image=self) + + def deprecate(self, replacement, state): + """ + Deprecate this image + + :param replacement: Image to use as a replacement + :type replacement: ``str`` or :class: `GCENodeImage` + + :param state: Deprecation state of this image. Possible values include + \'DELETED\', \'DEPRECATED\' or \'OBSOLETE\'. + :type state: ``str`` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_deprecate_image(self, replacement, state) + + +class GCENetwork(UuidMixin): + """A GCE Network object class.""" + def __init__(self, id, name, cidr, driver, extra=None): + self.id = str(id) + self.name = name + self.cidr = cidr + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this network + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_network(network=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.cidr) + + +class GCENodeSize(NodeSize): + """A GCE Node Size (MachineType) class.""" + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + extra=None): + self.extra = extra + super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth, + price, driver, extra=extra) + + +class GCEProject(UuidMixin): + """GCE Project information.""" + def __init__(self, id, name, metadata, quotas, driver, extra=None): + self.id = str(id) + self.name = name + self.metadata = metadata + self.quotas = quotas + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % (self.id, self.name) + + +class GCERegion(UuidMixin): + def __init__(self, id, name, status, zones, quotas, deprecated, driver, + extra=None): + self.id = str(id) + self.name = name + self.status = status + self.zones = zones + self.quotas = quotas + self.deprecated = deprecated + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.status) + + +class GCESnapshot(VolumeSnapshot): + def __init__(self, id, name, size, status, driver, extra=None): + self.name = name + self.status = status + super(GCESnapshot, self).__init__(id, driver, size, extra) + + +class GCETargetPool(UuidMixin): + def __init__(self, id, name, region, healthchecks, nodes, driver, + extra=None): + self.id = str(id) + self.name = name + self.region = region + self.healthchecks = healthchecks + self.nodes = nodes + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def add_node(self, node): + """ + Add a node to this target pool. + + :param node: Node to add + :type node: ``str`` or :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_add_node(targetpool=self, node=node) + + def remove_node(self, node): + """ + Remove a node from this target pool. + + :param node: Node to remove + :type node: ``str`` or :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_remove_node(targetpool=self, + node=node) + + def add_healthcheck(self, healthcheck): + """ + Add a healthcheck to this target pool. + + :param healthcheck: Healthcheck to add + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_add_healthcheck( + targetpool=self, healthcheck=healthcheck) + + def remove_healthcheck(self, healthcheck): + """ + Remove a healthcheck from this target pool. + + :param healthcheck: Healthcheck to remove + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_remove_healthcheck( + targetpool=self, healthcheck=healthcheck) + + def destroy(self): + """ + Destroy this Target Pool + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_targetpool(targetpool=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.region.name) + + +class GCEZone(NodeLocation): + """Subclass of NodeLocation to provide additional information.""" + def __init__(self, id, name, status, maintenance_windows, deprecated, + driver, extra=None): + self.status = status + self.maintenance_windows = maintenance_windows + self.deprecated = deprecated + self.extra = extra + country = name.split('-')[0] + super(GCEZone, self).__init__(id=str(id), name=name, country=country, + driver=driver) + + @property + def time_until_mw(self): + """ + Returns the time until the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_time_until_mw() + + @property + def next_mw_duration(self): + """ + Returns the duration of the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_next_mw_duration() + + def _now(self): + """ + Returns current UTC time. + + Can be overridden in unittests. + """ + return datetime.datetime.utcnow() + + def _get_next_maint(self): + """ + Returns the next Maintenance Window. + + :return: A dictionary containing maintenance window info (or None if + no maintenance windows are scheduled) + The dictionary contains 4 keys with values of type ``str`` + - name: The name of the maintenance window + - description: Description of the maintenance window + - beginTime: RFC3339 Timestamp + - endTime: RFC3339 Timestamp + :rtype: ``dict`` or ``None`` + """ + begin = None + next_window = None + if not self.maintenance_windows: + return None + if len(self.maintenance_windows) == 1: + return self.maintenance_windows[0] + for mw in self.maintenance_windows: + begin_next = timestamp_to_datetime(mw['beginTime']) + if (not begin) or (begin_next < begin): + begin = begin_next + next_window = mw + return next_window + + def _get_time_until_mw(self): + """ + Returns time until next maintenance window. + + :return: Time until next maintenance window (or None if no + maintenance windows are scheduled) + :rtype: :class:`datetime.timedelta` or ``None`` + """ + next_window = self._get_next_maint() + if not next_window: + return None + now = self._now() + next_begin = timestamp_to_datetime(next_window['beginTime']) + return next_begin - now + + def _get_next_mw_duration(self): + """ + Returns the duration of the next maintenance window. + + :return: Duration of next maintenance window (or None if no + maintenance windows are scheduled) + :rtype: :class:`datetime.timedelta` or ``None`` + """ + next_window = self._get_next_maint() + if not next_window: + return None + next_begin = timestamp_to_datetime(next_window['beginTime']) + next_end = timestamp_to_datetime(next_window['endTime']) + return next_end - next_begin + + def __repr__(self): + return '' % (self.id, self.name, + self.status) + + +class GCENodeDriver(NodeDriver): + """ + GCE Node Driver class. + + This is the primary driver for interacting with Google Compute Engine. It + contains all of the standard libcloud methods, plus additional ex_* methods + for more features. + + Note that many methods allow either objects or strings (or lists of + objects/strings). In most cases, passing strings instead of objects will + result in additional GCE API calls. + """ + connectionCls = GCEConnection + api_name = 'googleapis' + name = "Google Compute Engine" + type = Provider.GCE + website = 'https://cloud.google.com/' + + NODE_STATE_MAP = { + "PROVISIONING": NodeState.PENDING, + "STAGING": NodeState.PENDING, + "RUNNING": NodeState.RUNNING, + "STOPPED": NodeState.TERMINATED, + "STOPPING": NodeState.TERMINATED, + "TERMINATED": NodeState.TERMINATED + } + + def __init__(self, user_id, key, datacenter=None, project=None, + auth_type=None, scopes=None, **kwargs): + """ + :param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + :type user_id: ``str`` + + :param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + :type key: ``str`` + + :keyword datacenter: The name of the datacenter (zone) used for + operations. + :type datacenter: ``str`` + + :keyword project: Your GCE project name. (required) + :type project: ``str`` + + :keyword auth_type: Accepted values are "SA" or "IA" + ("Service Account" or "Installed Application"). + If not supplied, auth_type will be guessed based + on value of user_id. + :type auth_type: ``str`` + + :keyword scopes: List of authorization URLs. Default is empty and + grants read/write to Compute, Storage, DNS. + :type scopes: ``list`` + """ + self.auth_type = auth_type + self.project = project + self.scopes = scopes + if not self.project: + raise ValueError('Project name must be specified using ' + '"project" keyword.') + super(GCENodeDriver, self).__init__(user_id, key, **kwargs) + + # Cache Zone and Region information to reduce API calls and + # increase speed + self.base_path = '/compute/%s/projects/%s' % (API_VERSION, + self.project) + self.zone_list = self.ex_list_zones() + self.zone_dict = {} + for zone in self.zone_list: + self.zone_dict[zone.name] = zone + if datacenter: + self.zone = self.ex_get_zone(datacenter) + else: + self.zone = None + + self.region_list = self.ex_list_regions() + self.region_dict = {} + for region in self.region_list: + self.region_dict[region.name] = region + + if self.zone: + self.region = self._get_region_from_zone(self.zone) + else: + self.region = None + + def ex_list_addresses(self, region=None): + """ + Return a list of static addresses for a region or all. + + :keyword region: The region to return addresses from. For example: + 'us-central1'. If None, will return addresses from + region of self.zone. If 'all', will return all + addresses. + :type region: ``str`` or ``None`` + + :return: A list of static address objects. + :rtype: ``list`` of :class:`GCEAddress` + """ + list_addresses = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/addresses' + else: + request = '/regions/%s/addresses' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_addresses = [self._to_address(a) for a in + v.get('addresses', [])] + list_addresses.extend(region_addresses) + else: + list_addresses = [self._to_address(a) for a in + response['items']] + return list_addresses + + def ex_list_healthchecks(self): + """ + Return the list of health checks. + + :return: A list of health check objects. + :rtype: ``list`` of :class:`GCEHealthCheck` + """ + list_healthchecks = [] + request = '/global/httpHealthChecks' + response = self.connection.request(request, method='GET').object + list_healthchecks = [self._to_healthcheck(h) for h in + response.get('items', [])] + return list_healthchecks + + def ex_list_firewalls(self): + """ + Return the list of firewalls. + + :return: A list of firewall objects. + :rtype: ``list`` of :class:`GCEFirewall` + """ + list_firewalls = [] + request = '/global/firewalls' + response = self.connection.request(request, method='GET').object + list_firewalls = [self._to_firewall(f) for f in + response.get('items', [])] + return list_firewalls + + def ex_list_forwarding_rules(self, region=None): + """ + Return the list of forwarding rules for a region or all. + + :keyword region: The region to return forwarding rules from. For + example: 'us-central1'. If None, will return + forwarding rules from the region of self.region + (which is based on self.zone). If 'all', will + return all forwarding rules. + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A list of forwarding rule objects. + :rtype: ``list`` of :class:`GCEForwardingRule` + """ + list_forwarding_rules = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/forwardingRules' + else: + request = '/regions/%s/forwardingRules' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_forwarding_rules = [self._to_forwarding_rule(f) for + f in v.get('forwardingRules', + [])] + list_forwarding_rules.extend(region_forwarding_rules) + else: + list_forwarding_rules = [self._to_forwarding_rule(f) for f in + response['items']] + return list_forwarding_rules + + def list_images(self, ex_project=None): + """ + Return a list of image objects for a project. + + :keyword ex_project: Optional alternate project name. + :type ex_project: ``str`` or ``None`` + + :return: List of GCENodeImage objects + :rtype: ``list`` of :class:`GCENodeImage` + """ + request = '/global/images' + if ex_project is None: + response = self.connection.request(request, method='GET').object + else: + # Save the connection request_path + save_request_path = self.connection.request_path + # Override the connection request path + new_request_path = save_request_path.replace(self.project, + ex_project) + self.connection.request_path = new_request_path + response = self.connection.request(request, method='GET').object + # Restore the connection request_path + self.connection.request_path = save_request_path + list_images = [self._to_node_image(i) for i in + response.get('items', [])] + return list_images + + def list_locations(self): + """ + Return a list of locations (zones). + + The :class:`ex_list_zones` method returns more comprehensive results, + but this is here for compatibility. + + :return: List of NodeLocation objects + :rtype: ``list`` of :class:`NodeLocation` + """ + list_locations = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_locations = [self._to_node_location(l) for l in response['items']] + return list_locations + + def ex_list_networks(self): + """ + Return the list of networks. + + :return: A list of network objects. + :rtype: ``list`` of :class:`GCENetwork` + """ + list_networks = [] + request = '/global/networks' + response = self.connection.request(request, method='GET').object + list_networks = [self._to_network(n) for n in + response.get('items', [])] + return list_networks + + def list_nodes(self, ex_zone=None): + """ + Return a list of nodes in the current zone or all zones. + + :keyword ex_zone: Optional zone name or 'all' + :type ex_zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: List of Node objects + :rtype: ``list`` of :class:`Node` + """ + list_nodes = [] + zone = self._set_zone(ex_zone) + if zone is None: + request = '/aggregated/instances' + else: + request = '/zones/%s/instances' % (zone.name) + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_nodes = [self._to_node(i) for i in + v.get('instances', [])] + list_nodes.extend(zone_nodes) + else: + list_nodes = [self._to_node(i) for i in response['items']] + return list_nodes + + def ex_list_regions(self): + """ + Return the list of regions. + + :return: A list of region objects. + :rtype: ``list`` of :class:`GCERegion` + """ + list_regions = [] + request = '/regions' + response = self.connection.request(request, method='GET').object + list_regions = [self._to_region(r) for r in response['items']] + return list_regions + + def list_sizes(self, location=None): + """ + Return a list of sizes (machineTypes) in a zone. + + :keyword location: Location or Zone for sizes + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: List of GCENodeSize objects + :rtype: ``list`` of :class:`GCENodeSize` + """ + list_sizes = [] + zone = self._set_zone(location) + if zone is None: + request = '/aggregated/machineTypes' + else: + request = '/zones/%s/machineTypes' % (zone.name) + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_sizes = [self._to_node_size(s) for s in + v.get('machineTypes', [])] + list_sizes.extend(zone_sizes) + else: + list_sizes = [self._to_node_size(s) for s in response['items']] + return list_sizes + + def ex_list_snapshots(self): + """ + Return the list of disk snapshots in the project. + + :return: A list of snapshot objects + :rtype: ``list`` of :class:`GCESnapshot` + """ + list_snapshots = [] + request = '/global/snapshots' + response = self.connection.request(request, method='GET').object + list_snapshots = [self._to_snapshot(s) for s in + response.get('items', [])] + return list_snapshots + + def ex_list_targetpools(self, region=None): + """ + Return the list of target pools. + + :return: A list of target pool objects + :rtype: ``list`` of :class:`GCETargetPool` + """ + list_targetpools = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/targetPools' + else: + request = '/regions/%s/targetPools' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_targetpools = [self._to_targetpool(t) for t in + v.get('targetPools', [])] + list_targetpools.extend(region_targetpools) + else: + list_targetpools = [self._to_targetpool(t) for t in + response['items']] + return list_targetpools + + def list_volumes(self, ex_zone=None): + """ + Return a list of volumes for a zone or all. + + Will return list from provided zone, or from the default zone unless + given the value of 'all'. + + :keyword ex_zone: The zone to return volumes from. + :type ex_zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A list of volume objects. + :rtype: ``list`` of :class:`StorageVolume` + """ + list_volumes = [] + zone = self._set_zone(ex_zone) + if zone is None: + request = '/aggregated/disks' + else: + request = '/zones/%s/disks' % (zone.name) + + response = self.connection.request(request, method='GET').object + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_volumes = [self._to_storage_volume(d) for d in + v.get('disks', [])] + list_volumes.extend(zone_volumes) + else: + list_volumes = [self._to_storage_volume(d) for d in + response['items']] + return list_volumes + + def ex_list_zones(self): + """ + Return the list of zones. + + :return: A list of zone objects. + :rtype: ``list`` of :class:`GCEZone` + """ + list_zones = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_zones = [self._to_zone(z) for z in response['items']] + return list_zones + + def ex_create_address(self, name, region=None): + """ + Create a static address in a region. + + :param name: Name of static address + :type name: ``str`` + + :keyword region: Name of region for the address (e.g. 'us-central1') + :type region: ``str`` or :class:`GCERegion` + + :return: Static Address object + :rtype: :class:`GCEAddress` + """ + region = region or self.region + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + elif region is None: + raise ValueError('REGION_NOT_SPECIFIED', + 'Region must be provided for an address') + address_data = {'name': name} + request = '/regions/%s/addresses' % (region.name) + self.connection.async_request(request, method='POST', + data=address_data) + return self.ex_get_address(name, region=region) + + def ex_create_healthcheck(self, name, host=None, path=None, port=None, + interval=None, timeout=None, + unhealthy_threshold=None, + healthy_threshold=None): + """ + Create an Http Health Check. + + :param name: Name of health check + :type name: ``str`` + + :keyword host: Hostname of health check request. Defaults to empty + and public IP is used instead. + :type host: ``str`` + + :keyword path: The request path for the check. Defaults to /. + :type path: ``str`` + + :keyword port: The TCP port number for the check. Defaults to 80. + :type port: ``int`` + + :keyword interval: How often (in seconds) to check. Defaults to 5. + :type interval: ``int`` + + :keyword timeout: How long to wait before failing. Defaults to 5. + :type timeout: ``int`` + + :keyword unhealthy_threshold: How many failures before marking + unhealthy. Defaults to 2. + :type unhealthy_threshold: ``int`` + + :keyword healthy_threshold: How many successes before marking as + healthy. Defaults to 2. + :type healthy_threshold: ``int`` + + :return: Health Check object + :rtype: :class:`GCEHealthCheck` + """ + hc_data = {} + hc_data['name'] = name + if host: + hc_data['host'] = host + # As of right now, the 'default' values aren't getting set when called + # through the API, so set them explicitly + hc_data['requestPath'] = path or '/' + hc_data['port'] = port or 80 + hc_data['checkIntervalSec'] = interval or 5 + hc_data['timeoutSec'] = timeout or 5 + hc_data['unhealthyThreshold'] = unhealthy_threshold or 2 + hc_data['healthyThreshold'] = healthy_threshold or 2 + + request = '/global/httpHealthChecks' + + self.connection.async_request(request, method='POST', data=hc_data) + return self.ex_get_healthcheck(name) + + def ex_create_firewall(self, name, allowed, network='default', + source_ranges=None, source_tags=None, + target_tags=None): + """ + Create a firewall on a network. + + Firewall rules should be supplied in the "allowed" field. This is a + list of dictionaries formated like so ("ports" is optional):: + + [{"IPProtocol": "", + "ports": ""}] + + For example, to allow tcp on port 8080 and udp on all ports, 'allowed' + would be:: + + [{"IPProtocol": "tcp", + "ports": ["8080"]}, + {"IPProtocol": "udp"}] + + See `Firewall Reference `_ for more information. + + :param name: Name of the firewall to be created + :type name: ``str`` + + :param allowed: List of dictionaries with rules + :type allowed: ``list`` of ``dict`` + + :keyword network: The network that the firewall applies to. + :type network: ``str`` or :class:`GCENetwork` + + :keyword source_ranges: A list of IP ranges in CIDR format that the + firewall should apply to. Defaults to + ['0.0.0.0/0'] + :type source_ranges: ``list`` of ``str`` + + :keyword source_tags: A list of source instance tags the rules apply + to. + :type source_tags: ``list`` of ``str`` + + :keyword target_tags: A list of target instance tags the rules apply + to. + :type target_tags: ``list`` of ``str`` + + :return: Firewall object + :rtype: :class:`GCEFirewall` + """ + firewall_data = {} + if not hasattr(network, 'name'): + nw = self.ex_get_network(network) + else: + nw = network + + firewall_data['name'] = name + firewall_data['allowed'] = allowed + firewall_data['network'] = nw.extra['selfLink'] + firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0'] + if source_tags is not None: + firewall_data['sourceTags'] = source_tags + if target_tags is not None: + firewall_data['targetTags'] = target_tags + + request = '/global/firewalls' + + self.connection.async_request(request, method='POST', + data=firewall_data) + return self.ex_get_firewall(name) + + def ex_create_forwarding_rule(self, name, targetpool, region=None, + protocol='tcp', port_range=None, + address=None): + """ + Create a forwarding rule. + + :param name: Name of forwarding rule to be created + :type name: ``str`` + + :param targetpool: Target pool to apply the rule to + :param targetpool: ``str`` or :class:`GCETargetPool` + + :keyword region: Region to create the forwarding rule in. Defaults to + self.region + :type region: ``str`` or :class:`GCERegion` + + :keyword protocol: Should be 'tcp' or 'udp' + :type protocol: ``str`` + + :keyword port_range: Optional single port number or range separated + by a dash. Examples: '80', '5000-5999'. + :type port_range: ``str`` + + :keyword address: Optional static address for forwarding rule. Must be + in same region. + :type address: ``str`` or :class:`GCEAddress` + + :return: Forwarding Rule object + :rtype: :class:`GCEForwardingRule` + """ + forwarding_rule_data = {} + region = region or self.region + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool, region) + + forwarding_rule_data['name'] = name + forwarding_rule_data['region'] = region.extra['selfLink'] + forwarding_rule_data['target'] = targetpool.extra['selfLink'] + forwarding_rule_data['protocol'] = protocol.upper() + if address: + if not hasattr(address, 'name'): + address = self.ex_get_address(address, region) + forwarding_rule_data['IPAddress'] = address.extra['selfLink'] + if port_range: + forwarding_rule_data['portRange'] = port_range + + request = '/regions/%s/forwardingRules' % (region.name) + + self.connection.async_request(request, method='POST', + data=forwarding_rule_data) + + return self.ex_get_forwarding_rule(name) + + def ex_create_network(self, name, cidr): + """ + Create a network. + + :param name: Name of network to be created + :type name: ``str`` + + :param cidr: Address range of network in CIDR format. + :type cidr: ``str`` + + :return: Network object + :rtype: :class:`GCENetwork` + """ + network_data = {} + network_data['name'] = name + network_data['IPv4Range'] = cidr + + request = '/global/networks' + + self.connection.async_request(request, method='POST', + data=network_data) + + return self.ex_get_network(name) + + def create_node(self, name, size, image, location=None, + ex_network='default', ex_tags=None, ex_metadata=None, + ex_boot_disk=None, use_existing_disk=True, + external_ip='ephemeral'): + """ + Create a new node and return a node object for the node. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the node (or, if attaching + a persistent disk, the image used to create the disk) + :type image: ``str`` or :class:`GCENodeImage` + + :keyword location: The location (zone) to create the node in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the node. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to associate with the node. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :keyword ex_metadata: Metadata dictionary for instance. + :type ex_metadata: ``dict`` or ``None`` + + :keyword ex_boot_disk: The boot disk to attach to the instance. + :type ex_boot_disk: :class:`StorageVolume` or ``str`` + + :keyword use_existing_disk: If True and if an existing disk with the + same name/location is found, use that + disk instead of creating a new one. + :type use_existing_disk: ``bool`` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. To use an existing static IP address, + a GCEAddress object should be passed in. + :type external_ip: :class:`GCEAddress` or ``str`` or None + + :return: A Node object for the new node. + :rtype: :class:`Node` + """ + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + if not ex_boot_disk: + ex_boot_disk = self.create_volume(None, name, location=location, + image=image, + use_existing=use_existing_disk) + + if ex_metadata is not None: + ex_metadata = {"items": [{"key": k, "value": v} + for k, v in ex_metadata.items()]} + + request, node_data = self._create_node_req(name, size, image, + location, ex_network, + ex_tags, ex_metadata, + ex_boot_disk, external_ip) + self.connection.async_request(request, method='POST', data=node_data) + + return self.ex_get_node(name, location.name) + + def ex_create_multiple_nodes(self, base_name, size, image, number, + location=None, ex_network='default', + ex_tags=None, ex_metadata=None, + ignore_errors=True, use_existing_disk=True, + poll_interval=2, external_ip='ephemeral', + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Create multiple nodes and return a list of Node objects. + + Nodes will be named with the base name and a number. For example, if + the base name is 'libcloud' and you create 3 nodes, they will be + named:: + + libcloud-000 + libcloud-001 + libcloud-002 + + :param base_name: The base name of the nodes to create. + :type base_name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the nodes. + :type image: ``str`` or :class:`GCENodeImage` + + :param number: The number of nodes to create. + :type number: ``int`` + + :keyword location: The location (zone) to create the nodes in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the nodes. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to assiciate with the nodes. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :keyword ex_metadata: Metadata dictionary for instances. + :type ex_metadata: ``dict`` or ``None`` + + :keyword ignore_errors: If True, don't raise Exceptions if one or + more nodes fails. + :type ignore_errors: ``bool`` + + :keyword use_existing_disk: If True and if an existing disk with the + same name/location is found, use that + disk instead of creating a new one. + :type use_existing_disk: ``bool`` + + :keyword poll_interval: Number of seconds between status checks. + :type poll_interval: ``int`` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. (Static addresses are not supported for + multiple node creation.) + :type external_ip: ``str`` or None + + :keyword timeout: The number of seconds to wait for all nodes to be + created before timing out. + :type timeout: ``int`` + + :return: A list of Node objects for the new nodes. + :rtype: ``list`` of :class:`Node` + """ + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + node_attrs = {'size': size, + 'image': image, + 'location': location, + 'network': ex_network, + 'tags': ex_tags, + 'metadata': ex_metadata, + 'ignore_errors': ignore_errors, + 'use_existing_disk': use_existing_disk, + 'external_ip': external_ip} + + # List for holding the status information for disk/node creation. + status_list = [] + + for i in range(number): + name = '%s-%03d' % (base_name, i) + + status = {'name': name, + 'node_response': None, + 'node': None, + 'disk_response': None, + 'disk': None} + + status_list.append(status) + + # Create disks for nodes + for status in status_list: + self._multi_create_disk(status, node_attrs) + + start_time = time.time() + complete = False + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting for multiple " + "instances") + complete = True + time.sleep(poll_interval) + for status in status_list: + # If disk does not yet exist, check on its status + if not status['disk']: + self._multi_check_disk(status, node_attrs) + + # If disk exists, but node does not, create the node or check + # on its status if already in progress. + if status['disk'] and not status['node']: + if not status['node_response']: + self._multi_create_node(status, node_attrs) + else: + self._multi_check_node(status, node_attrs) + # If any of the nodes have not been created (or failed) we are + # not done yet. + if not status['node']: + complete = False + + # Return list of nodes + node_list = [] + for status in status_list: + node_list.append(status['node']) + return node_list + + def ex_create_targetpool(self, name, region=None, healthchecks=None, + nodes=None): + """ + Create a target pool. + + :param name: Name of target pool + :type name: ``str`` + + :keyword region: Region to create the target pool in. Defaults to + self.region + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :keyword healthchecks: Optional list of health checks to attach + :type healthchecks: ``list`` of ``str`` or :class:`GCEHealthCheck` + + :keyword nodes: Optional list of nodes to attach to the pool + :type nodes: ``list`` of ``str`` or :class:`Node` + + :return: Target Pool object + :rtype: :class:`GCETargetPool` + """ + region = region or self.region + targetpool_data = {} + targetpool_data['name'] = name + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + targetpool_data['region'] = region.extra['selfLink'] + + if healthchecks: + if not hasattr(healthchecks[0], 'name'): + hc_list = [self.ex_get_healthcheck(h).extra['selfLink'] for h + in healthchecks] + else: + hc_list = [h.extra['selfLink'] for h in healthchecks] + targetpool_data['healthChecks'] = hc_list + if nodes: + if not hasattr(nodes[0], 'name'): + node_list = [self.ex_get_node(n, 'all').extra['selfLink'] for n + in nodes] + else: + node_list = [n.extra['selfLink'] for n in nodes] + targetpool_data['instances'] = node_list + + request = '/regions/%s/targetPools' % (region.name) + + self.connection.async_request(request, method='POST', + data=targetpool_data) + + return self.ex_get_targetpool(name, region) + + def create_volume(self, size, name, location=None, snapshot=None, + image=None, use_existing=True): + """ + Create a volume (disk). + + :param size: Size of volume to create (in GB). Can be None if image + or snapshot is supplied. + :type size: ``int`` or ``str`` or ``None`` + + :param name: Name of volume to create + :type name: ``str`` + + :keyword location: Location (zone) to create the volume in + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :keyword snapshot: Snapshot to create image from + :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` + + :keyword image: Image to create disk from. + :type image: :class:`GCENodeImage` or ``str`` or ``None`` + + :keyword use_existing: If True and a disk with the given name already + exists, return an object for that disk instead + of attempting to create a new disk. + :type use_existing: ``bool`` + + :return: Storage Volume object + :rtype: :class:`StorageVolume` + """ + request, volume_data, params = self._create_vol_req( + size, name, location, snapshot, image) + try: + self.connection.async_request(request, method='POST', + data=volume_data, params=params) + except ResourceExistsError: + e = sys.exc_info()[1] + if not use_existing: + raise e + + return self.ex_get_volume(name, location) + + def create_volume_snapshot(self, volume, name): + """ + Create a snapshot of the provided Volume. + + :param volume: A StorageVolume object + :type volume: :class:`StorageVolume` + + :return: A GCE Snapshot object + :rtype: :class:`GCESnapshot` + """ + snapshot_data = {} + snapshot_data['name'] = name + request = '/zones/%s/disks/%s/createSnapshot' % ( + volume.extra['zone'].name, volume.name) + self.connection.async_request(request, method='POST', + data=snapshot_data) + + return self.ex_get_snapshot(name) + + def list_volume_snapshots(self, volume): + """ + List snapshots created from the provided volume. + + For GCE, snapshots are global, but while the volume they were + created from still exists, the source disk for the snapshot is + tracked. + + :param volume: A StorageVolume object + :type volume: :class:`StorageVolume` + + :return: A list of Snapshot objects + :rtype: ``list`` of :class:`GCESnapshot` + """ + volume_snapshots = [] + volume_link = volume.extra['selfLink'] + all_snapshots = self.ex_list_snapshots() + for snapshot in all_snapshots: + if snapshot.extra['sourceDisk'] == volume_link: + volume_snapshots.append(snapshot) + return volume_snapshots + + def ex_update_healthcheck(self, healthcheck): + """ + Update a health check with new values. + + To update, change the attributes of the health check object and pass + the updated object to the method. + + :param healthcheck: A healthcheck object with updated values. + :type healthcheck: :class:`GCEHealthCheck` + + :return: An object representing the new state of the health check. + :rtype: :class:`GCEHealthCheck` + """ + hc_data = {} + hc_data['name'] = healthcheck.name + hc_data['requestPath'] = healthcheck.path + hc_data['port'] = healthcheck.port + hc_data['checkIntervalSec'] = healthcheck.interval + hc_data['timeoutSec'] = healthcheck.timeout + hc_data['unhealthyThreshold'] = healthcheck.unhealthy_threshold + hc_data['healthyThreshold'] = healthcheck.healthy_threshold + if healthcheck.extra['host']: + hc_data['host'] = healthcheck.extra['host'] + if healthcheck.extra['description']: + hc_data['description'] = healthcheck.extra['description'] + + request = '/global/httpHealthChecks/%s' % (healthcheck.name) + + self.connection.async_request(request, method='PUT', + data=hc_data) + + return self.ex_get_healthcheck(healthcheck.name) + + def ex_update_firewall(self, firewall): + """ + Update a firewall with new values. + + To update, change the attributes of the firewall object and pass the + updated object to the method. + + :param firewall: A firewall object with updated values. + :type firewall: :class:`GCEFirewall` + + :return: An object representing the new state of the firewall. + :rtype: :class:`GCEFirewall` + """ + firewall_data = {} + firewall_data['name'] = firewall.name + firewall_data['allowed'] = firewall.allowed + firewall_data['network'] = firewall.network.extra['selfLink'] + if firewall.source_ranges: + firewall_data['sourceRanges'] = firewall.source_ranges + if firewall.source_tags: + firewall_data['sourceTags'] = firewall.source_tags + if firewall.target_tags: + firewall_data['targetTags'] = firewall.target_tags + if firewall.extra['description']: + firewall_data['description'] = firewall.extra['description'] + + request = '/global/firewalls/%s' % (firewall.name) + + self.connection.async_request(request, method='PUT', + data=firewall_data) + + return self.ex_get_firewall(firewall.name) + + def ex_targetpool_add_node(self, targetpool, node): + """ + Add a node to a target pool. + + :param targetpool: The targetpool to add node to + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param node: The node to add + :type node: ``str`` or :class:`Node` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + + targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + + request = '/regions/%s/targetPools/%s/addInstance' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + targetpool.nodes.append(node) + return True + + def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): + """ + Add a health check to a target pool. + + :param targetpool: The targetpool to add health check to + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param healthcheck: The healthcheck to add + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(healthcheck, 'name'): + healthcheck = self.ex_get_healthcheck(healthcheck) + + targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + + request = '/regions/%s/targetPools/%s/addHealthCheck' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + targetpool.healthchecks.append(healthcheck) + return True + + def ex_targetpool_remove_node(self, targetpool, node): + """ + Remove a node from a target pool. + + :param targetpool: The targetpool to remove node from + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param node: The node to remove + :type node: ``str`` or :class:`Node` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + + targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + + request = '/regions/%s/targetPools/%s/removeInstance' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + # Remove node object from node list + index = None + for i, nd in enumerate(targetpool.nodes): + if nd.name == node.name: + index = i + break + if index is not None: + targetpool.nodes.pop(index) + return True + + def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): + """ + Remove a health check from a target pool. + + :param targetpool: The targetpool to remove health check from + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param healthcheck: The healthcheck to remove + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(healthcheck, 'name'): + healthcheck = self.ex_get_healthcheck(healthcheck) + + targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + + request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + # Remove healthcheck object from healthchecks list + index = None + for i, hc in enumerate(targetpool.healthchecks): + if hc.name == healthcheck.name: + index = i + if index is not None: + targetpool.healthchecks.pop(index) + return True + + def reboot_node(self, node): + """ + Reboot a node. + + :param node: Node to be rebooted + :type node: :class:`Node` + + :return: True if successful, False if not + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name, + node.name) + self.connection.async_request(request, method='POST', + data='ignored') + return True + + def ex_set_node_tags(self, node, tags): + """ + Set the tags on a Node instance. + + Note that this updates the node object directly. + + :param node: Node object + :type node: :class:`Node` + + :param tags: List of tags to apply to the object + :type tags: ``list`` of ``str`` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name, + node.name) + + tags_data = {} + tags_data['items'] = tags + tags_data['fingerprint'] = node.extra['tags_fingerprint'] + + self.connection.async_request(request, method='POST', + data=tags_data) + new_node = self.ex_get_node(node.name, node.extra['zone']) + node.extra['tags'] = new_node.extra['tags'] + node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint'] + return True + + def ex_set_node_scheduling(self, node, on_host_maintenance=None, + automatic_restart=None): + """Set the maintenance behavior for the node. + + See `Scheduling `_ documentation for more info. + + :param node: Node object + :type node: :class:`Node` + + :keyword on_host_maintenance: Defines whether node should be + terminated or migrated when host machine + goes down. Acceptable values are: + 'MIGRATE' or 'TERMINATE' (If not + supplied, value will be reset to GCE + default value for the instance type.) + :type on_host_maintenance: ``str`` + + :keyword automatic_restart: Defines whether the instance should be + automatically restarted when it is + terminated by Compute Engine. (If not + supplied, value will be set to the GCE + default value for the instance type.) + :type automatic_restart: ``bool`` + + :return: True if successful. + :rtype: ``bool`` + """ + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + if on_host_maintenance is not None: + on_host_maintenance = on_host_maintenance.upper() + ohm_values = ['MIGRATE', 'TERMINATE'] + if on_host_maintenance not in ohm_values: + raise ValueError('on_host_maintenance must be one of %s' % + ','.join(ohm_values)) + + request = '/zones/%s/instances/%s/setScheduling' % ( + node.extra['zone'].name, node.name) + + scheduling_data = {} + if on_host_maintenance is not None: + scheduling_data['onHostMaintenance'] = on_host_maintenance + if automatic_restart is not None: + scheduling_data['automaticRestart'] = automatic_restart + + self.connection.async_request(request, method='POST', + data=scheduling_data) + + new_node = self.ex_get_node(node.name, node.extra['zone']) + node.extra['scheduling'] = new_node.extra['scheduling'] + + ohm = node.extra['scheduling'].get('onHostMaintenance') + ar = node.extra['scheduling'].get('automaticRestart') + + success = True + if on_host_maintenance not in [None, ohm]: + success = False + if automatic_restart not in [None, ar]: + success = False + + return success + + def deploy_node(self, name, size, image, script, location=None, + ex_network='default', ex_tags=None): + """ + Create a new node and run a script on start-up. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the node. + :type image: ``str`` or :class:`GCENodeImage` + + :param script: File path to start-up script + :type script: ``str`` + + :keyword location: The location (zone) to create the node in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the node. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to associate with the node. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :return: A Node object for the new node. + :rtype: :class:`Node` + """ + with open(script, 'r') as f: + script_data = f.read() + metadata = {'items': [{'key': 'startup-script', + 'value': script_data}]} + + return self.create_node(name, size, image, location=location, + ex_network=ex_network, ex_tags=ex_tags, + ex_metadata=metadata) + + def attach_volume(self, node, volume, device=None, ex_mode=None, + ex_boot=False): + """ + Attach a volume to a node. + + If volume is None, a scratch disk will be created and attached. + + :param node: The node to attach the volume to + :type node: :class:`Node` + + :param volume: The volume to attach. If none, a scratch disk will be + attached. + :type volume: :class:`StorageVolume` or ``None`` + + :keyword device: The device name to attach the volume as. Defaults to + volume name. + :type device: ``str`` + + :keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY' + :type ex_mode: ``str`` + + :keyword ex_boot: If true, disk will be attached as a boot disk + :type ex_boot: ``bool`` + + :return: True if successful + :rtype: ``bool`` + """ + volume_data = {} + if volume is None: + volume_data['type'] = 'SCRATCH' + else: + volume_data['type'] = 'PERSISTENT' + volume_data['source'] = volume.extra['selfLink'] + volume_data['kind'] = 'compute#attachedDisk' + volume_data['mode'] = ex_mode or 'READ_WRITE' + + if device: + volume_data['deviceName'] = device + else: + volume_data['deviceName'] = volume.name + + volume_data['boot'] = ex_boot + + request = '/zones/%s/instances/%s/attachDisk' % ( + node.extra['zone'].name, node.name) + self.connection.async_request(request, method='POST', + data=volume_data) + return True + + def detach_volume(self, volume, ex_node=None): + """ + Detach a volume from a node. + + :param volume: Volume object to detach + :type volume: :class:`StorageVolume` + + :keyword ex_node: Node object to detach volume from (required) + :type ex_node: :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + if not ex_node: + return False + request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % ( + ex_node.extra['zone'].name, ex_node.name, volume.name) + + self.connection.async_request(request, method='POST', + data='ignored') + return True + + def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): + """ + Sets the auto-delete flag for a volume attached to a node. + + :param volume: Volume object to auto-delete + :type volume: :class:`StorageVolume` + + :param ex_node: Node object to auto-delete volume from + :type ex_node: :class:`Node` + + :keyword auto_delete: Flag to set for the auto-delete value + :type auto_delete: ``bool`` (default True) + + :return: True if successfull + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( + node.extra['zone'].name, node.name + ) + delete_params = { + 'deviceName': volume, + 'autoDelete': auto_delete, + } + self.connection.async_request(request, method='POST', + params=delete_params) + return True + + def ex_destroy_address(self, address): + """ + Destroy a static address. + + :param address: Address object to destroy + :type address: :class:`GCEAddress` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/addresses/%s' % (address.region.name, + address.name) + + self.connection.async_request(request, method='DELETE') + return True + + def ex_delete_image(self, image): + """ + Delete a specific image resource. + + :param image: Image object to delete + :type image: ``str`` or :class:`GCENodeImage` + + :return: True if successfull + :rtype: ``bool`` + """ + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + request = '/global/images/%s' % (image.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_deprecate_image(self, image, replacement, state=None): + """ + Deprecate a specific image resource. + + :param image: Image object to deprecate + :type image: ``str`` or :class: `GCENodeImage` + + :param replacement: Image object to use as a replacement + :type replacement: ``str`` or :class: `GCENodeImage` + + :param state: State of the image + :type state: ``str`` + + :return: True if successfull + :rtype: ``bool`` + """ + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + if not hasattr(replacement, 'name'): + replacement = self.ex_get_image(replacement) + + if state is None: + state = 'DEPRECATED' + + possible_states = ['DELETED', 'DEPRECATED', 'OBSOLETE'] + + if state not in possible_states: + raise ValueError('state must be one of %s' + % ','.join(possible_states)) + + image_data = { + 'state': state, + 'replacement': replacement.extra['selfLink'], + } + + request = '/global/images/%s/deprecate' % (image.name) + + self.connection.request( + request, method='POST', data=image_data).object + + return True + + def ex_destroy_healthcheck(self, healthcheck): + """ + Destroy a healthcheck. + + :param healthcheck: Health check object to destroy + :type healthcheck: :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/httpHealthChecks/%s' % (healthcheck.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_firewall(self, firewall): + """ + Destroy a firewall. + + :param firewall: Firewall object to destroy + :type firewall: :class:`GCEFirewall` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/firewalls/%s' % (firewall.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_forwarding_rule(self, forwarding_rule): + """ + Destroy a forwarding rule. + + :param forwarding_rule: Forwarding Rule object to destroy + :type forwarding_rule: :class:`GCEForwardingRule` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/forwardingRules/%s' % ( + forwarding_rule.region.name, forwarding_rule.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_network(self, network): + """ + Destroy a network. + + :param network: Network object to destroy + :type network: :class:`GCENetwork` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/networks/%s' % (network.name) + self.connection.async_request(request, method='DELETE') + return True + + def destroy_node(self, node, destroy_boot_disk=False): + """ + Destroy a node. + + :param node: Node object to destroy + :type node: :class:`Node` + + :keyword destroy_boot_disk: If true, also destroy the node's + boot disk. (Note that this keyword is not + accessible from the node's .destroy() + method.) + :type destroy_boot_disk: ``bool`` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + self.connection.async_request(request, method='DELETE') + if destroy_boot_disk and node.extra['boot_disk']: + node.extra['boot_disk'].destroy() + return True + + def ex_destroy_multiple_nodes(self, node_list, ignore_errors=True, + destroy_boot_disk=False, poll_interval=2, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Destroy multiple nodes at once. + + :param node_list: List of nodes to destroy + :type node_list: ``list`` of :class:`Node` + + :keyword ignore_errors: If true, don't raise an exception if one or + more nodes fails to be destroyed. + :type ignore_errors: ``bool`` + + :keyword destroy_boot_disk: If true, also destroy the nodes' boot + disks. + :type destroy_boot_disk: ``bool`` + + :keyword poll_interval: Number of seconds between status checks. + :type poll_interval: ``int`` + + :keyword timeout: Number of seconds to wait for all nodes to be + destroyed. + :type timeout: ``int`` + + :return: A list of boolean values. One for each node. True means + that the node was successfully destroyed. + :rtype: ``list`` of ``bool`` + """ + status_list = [] + complete = False + start_time = time.time() + for node in node_list: + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + try: + response = self.connection.request(request, + method='DELETE').object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + response = None + + status = {'node': node, + 'node_success': False, + 'node_response': response, + 'disk_success': not destroy_boot_disk, + 'disk_response': None} + + status_list.append(status) + + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting to delete " + "multiple instances") + complete = True + for status in status_list: + # If one of the operations is running, check the status + operation = status['node_response'] or status['disk_response'] + delete_disk = False + if operation: + no_errors = True + try: + response = self.connection.request( + operation['selfLink']).object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + no_errors = False + response = {'status': 'DONE'} + if response['status'] == 'DONE': + # If a node was deleted, update status and indicate + # that the disk is ready to be deleted. + if status['node_response']: + status['node_response'] = None + status['node_success'] = no_errors + delete_disk = True + else: + status['disk_response'] = None + status['disk_success'] = no_errors + # If we are destroying disks, and the node has been deleted, + # destroy the disk. + if delete_disk and destroy_boot_disk: + boot_disk = status['node'].extra['boot_disk'] + if boot_disk: + request = '/zones/%s/disks/%s' % ( + boot_disk.extra['zone'].name, boot_disk.name) + try: + response = self.connection.request( + request, method='DELETE').object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + no_errors = False + response = None + status['disk_response'] = response + else: # If there is no boot disk, ignore + status['disk_success'] = True + operation = status['node_response'] or status['disk_response'] + if operation: + time.sleep(poll_interval) + complete = False + + success = [] + for status in status_list: + s = status['node_success'] and status['disk_success'] + success.append(s) + return success + + def ex_destroy_targetpool(self, targetpool): + """ + Destroy a target pool. + + :param targetpool: TargetPool object to destroy + :type targetpool: :class:`GCETargetPool` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/targetPools/%s' % (targetpool.region.name, + targetpool.name) + + self.connection.async_request(request, method='DELETE') + return True + + def destroy_volume(self, volume): + """ + Destroy a volume. + + :param volume: Volume object to destroy + :type volume: :class:`StorageVolume` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/disks/%s' % (volume.extra['zone'].name, + volume.name) + self.connection.async_request(request, method='DELETE') + return True + + def destroy_volume_snapshot(self, snapshot): + """ + Destroy a snapshot. + + :param snapshot: Snapshot object to destroy + :type snapshot: :class:`GCESnapshot` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/snapshots/%s' % (snapshot.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_get_address(self, name, region=None): + """ + Return an Address object based on an address name and optional region. + + :param name: The name of the address + :type name: ``str`` + + :keyword region: The region to search for the address in (set to + 'all' to search all regions) + :type region: ``str`` :class:`GCERegion` or ``None`` + + :return: An Address object for the address + :rtype: :class:`GCEAddress` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'addresses', region=True, res_name='Address') + request = '/regions/%s/addresses/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_address(response) + + def ex_get_healthcheck(self, name): + """ + Return a HealthCheck object based on the healthcheck name. + + :param name: The name of the healthcheck + :type name: ``str`` + + :return: A GCEHealthCheck object + :rtype: :class:`GCEHealthCheck` + """ + request = '/global/httpHealthChecks/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_healthcheck(response) + + def ex_get_firewall(self, name): + """ + Return a Firewall object based on the firewall name. + + :param name: The name of the firewall + :type name: ``str`` + + :return: A GCEFirewall object + :rtype: :class:`GCEFirewall` + """ + request = '/global/firewalls/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_firewall(response) + + def ex_get_forwarding_rule(self, name, region=None): + """ + Return a Forwarding Rule object based on the forwarding rule name. + + :param name: The name of the forwarding rule + :type name: ``str`` + + :keyword region: The region to search for the rule in (set to 'all' + to search all regions). + :type region: ``str`` or ``None`` + + :return: A GCEForwardingRule object + :rtype: :class:`GCEForwardingRule` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'forwardingRules', region=True, res_name='ForwardingRule') + request = '/regions/%s/forwardingRules/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_forwarding_rule(response) + + def ex_get_image(self, partial_name): + """ + Return an GCENodeImage object based on the name or link provided. + + :param partial_name: The name, partial name, or full path of a GCE + image. + :type partial_name: ``str`` + + :return: GCENodeImage object based on provided information or None if + an image with that name is not found. + :rtype: :class:`GCENodeImage` or ``None`` + """ + if partial_name.startswith('https://'): + response = self.connection.request(partial_name, method='GET') + return self._to_node_image(response.object) + image = self._match_images(None, partial_name) + if not image: + if partial_name.startswith('debian'): + image = self._match_images('debian-cloud', partial_name) + elif partial_name.startswith('centos'): + image = self._match_images('centos-cloud', partial_name) + elif partial_name.startswith('container-vm'): + image = self._match_images('google-containers', partial_name) + + return image + + def ex_get_network(self, name): + """ + Return a Network object based on a network name. + + :param name: The name of the network + :type name: ``str`` + + :return: A Network object for the network + :rtype: :class:`GCENetwork` + """ + request = '/global/networks/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_network(response) + + def ex_get_node(self, name, zone=None): + """ + Return a Node object based on a node name and optional zone. + + :param name: The name of the node + :type name: ``str`` + + :keyword zone: The zone to search for the node in. If set to 'all', + search all zones for the instance. + :type zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A Node object for the node + :rtype: :class:`Node` + """ + zone = self._set_zone(zone) or self._find_zone_or_region( + name, 'instances', res_name='Node') + request = '/zones/%s/instances/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node(response) + + def ex_get_project(self): + """ + Return a Project object with project-wide information. + + :return: A GCEProject object + :rtype: :class:`GCEProject` + """ + response = self.connection.request('', method='GET').object + return self._to_project(response) + + def ex_get_size(self, name, zone=None): + """ + Return a size object based on a machine type name and zone. + + :param name: The name of the node + :type name: ``str`` + + :keyword zone: The zone to search for the machine type in + :type zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A GCENodeSize object for the machine type + :rtype: :class:`GCENodeSize` + """ + zone = zone or self.zone + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + request = '/zones/%s/machineTypes/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node_size(response) + + def ex_get_snapshot(self, name): + """ + Return a Snapshot object based on snapshot name. + + :param name: The name of the snapshot + :type name: ``str`` + + :return: A GCESnapshot object for the snapshot + :rtype: :class:`GCESnapshot` + """ + request = '/global/snapshots/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_snapshot(response) + + def ex_get_volume(self, name, zone=None): + """ + Return a Volume object based on a volume name and optional zone. + + :param name: The name of the volume + :type name: ``str`` + + :keyword zone: The zone to search for the volume in (set to 'all' to + search all zones) + :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` + or ``None`` + + :return: A StorageVolume object for the volume + :rtype: :class:`StorageVolume` + """ + zone = self._set_zone(zone) or self._find_zone_or_region( + name, 'disks', res_name='Volume') + request = '/zones/%s/disks/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_storage_volume(response) + + def ex_get_region(self, name): + """ + Return a Region object based on the region name. + + :param name: The name of the region. + :type name: ``str`` + + :return: A GCERegion object for the region + :rtype: :class:`GCERegion` + """ + if name.startswith('https://'): + short_name = self._get_components_from_path(name)['name'] + request = name + else: + short_name = name + request = '/regions/%s' % (name) + # Check region cache first + if short_name in self.region_dict: + return self.region_dict[short_name] + # Otherwise, look up region information + response = self.connection.request(request, method='GET').object + return self._to_region(response) + + def ex_get_targetpool(self, name, region=None): + """ + Return a TargetPool object based on a name and optional region. + + :param name: The name of the target pool + :type name: ``str`` + + :keyword region: The region to search for the target pool in (set to + 'all' to search all regions). + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A TargetPool object for the pool + :rtype: :class:`GCETargetPool` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'targetPools', region=True, res_name='TargetPool') + request = '/regions/%s/targetPools/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_targetpool(response) + + def ex_get_zone(self, name): + """ + Return a Zone object based on the zone name. + + :param name: The name of the zone. + :type name: ``str`` + + :return: A GCEZone object for the zone or None if not found + :rtype: :class:`GCEZone` or ``None`` + """ + if name.startswith('https://'): + short_name = self._get_components_from_path(name)['name'] + request = name + else: + short_name = name + request = '/zones/%s' % (name) + # Check zone cache first + if short_name in self.zone_dict: + return self.zone_dict[short_name] + # Otherwise, look up zone information + try: + response = self.connection.request(request, method='GET').object + except ResourceNotFoundError: + return None + return self._to_zone(response) + + def ex_copy_image(self, name, url, description=None): + """ + Copy an image to your image collection. + + :param name: The name of the image + :type name: ``str`` + + :param url: The URL to the image. The URL can start with `gs://` + :param url: ``str`` + + :param description: The description of the image + :type description: ``str`` + + :return: NodeImage object based on provided information or None if an + image with that name is not found. + :rtype: :class:`NodeImage` or ``None`` + """ + + # the URL for an image can start with gs:// + if url.startswith('gs://'): + url = url.replace('gs://', 'https://storage.googleapis.com/', 1) + + image_data = { + 'name': name, + 'description': description, + 'sourceType': 'RAW', + 'rawDisk': { + 'source': url, + }, + } + + request = '/global/images' + self.connection.async_request(request, method='POST', + data=image_data) + return self.ex_get_image(name) + + def _ex_connection_class_kwargs(self): + return {'auth_type': self.auth_type, + 'project': self.project, + 'scopes': self.scopes} + + def _catch_error(self, ignore_errors=False): + """ + Catch an exception and raise it unless asked to ignore it. + + :keyword ignore_errors: If true, just return the error. Otherwise, + raise the error. + :type ignore_errors: ``bool`` + + :return: The exception that was raised. + :rtype: :class:`Exception` + """ + e = sys.exc_info()[1] + if ignore_errors: + return e + else: + raise e + + def _get_components_from_path(self, path): + """ + Return a dictionary containing name & zone/region from a request path. + + :param path: HTTP request path (e.g. + '/project/pjt-name/zones/us-central1-a/instances/mynode') + :type path: ``str`` + + :return: Dictionary containing name and zone/region of resource + :rtype ``dict`` + """ + region = None + zone = None + glob = False + components = path.split('/') + name = components[-1] + if components[-4] == 'regions': + region = components[-3] + elif components[-4] == 'zones': + zone = components[-3] + elif components[-3] == 'global': + glob = True + + return {'name': name, 'region': region, 'zone': zone, 'global': glob} + + def _get_region_from_zone(self, zone): + """ + Return the Region object that contains the given Zone object. + + :param zone: Zone object + :type zone: :class:`GCEZone` + + :return: Region object that contains the zone + :rtype: :class:`GCERegion` + """ + for region in self.region_list: + zones = [z.name for z in region.zones] + if zone.name in zones: + return region + + def _find_zone_or_region(self, name, res_type, region=False, + res_name=None): + """ + Find the zone or region for a named resource. + + :param name: Name of resource to find + :type name: ``str`` + + :param res_type: Type of resource to find. + Examples include: 'disks', 'instances' or 'addresses' + :type res_type: ``str`` + + :keyword region: If True, search regions instead of zones + :type region: ``bool`` + + :keyword res_name: The name of the resource type for error messages. + Examples: 'Volume', 'Node', 'Address' + :keyword res_name: ``str`` + + :return: Zone/Region object for the zone/region for the resource. + :rtype: :class:`GCEZone` or :class:`GCERegion` + """ + if region: + rz = 'region' + else: + rz = 'zone' + rz_name = None + res_name = res_name or res_type + request = '/aggregated/%s' % (res_type) + res_list = self.connection.request(request).object + for k, v in res_list['items'].items(): + for res in v.get(res_type, []): + if res['name'] == name: + rz_name = k.replace('%ss/' % (rz), '') + break + if not rz_name: + raise ResourceNotFoundError( + '%s \'%s\' not found in any %s.' % (res_name, name, rz), + None, None) + else: + getrz = getattr(self, 'ex_get_%s' % (rz)) + return getrz(rz_name) + + def _match_images(self, project, partial_name): + """ + Find the latest image, given a partial name. + + For example, providing 'debian-7' will return the image object for the + most recent image with a name that starts with 'debian-7' in the + supplied project. If no project is given, it will search your own + project. + + :param project: The name of the project to search for images. + Examples include: 'debian-cloud' and 'centos-cloud'. + :type project: ``str`` or ``None`` + + :param partial_name: The full name or beginning of a name for an + image. + :type partial_name: ``str`` + + :return: The latest image object that matches the partial name or None + if no matching image is found. + :rtype: :class:`GCENodeImage` or ``None`` + """ + project_images = self.list_images(project) + partial_match = [] + for image in project_images: + if image.name == partial_name: + return image + if image.name.startswith(partial_name): + ts = timestamp_to_datetime(image.extra['creationTimestamp']) + if not partial_match or partial_match[0] < ts: + partial_match = [ts, image] + + if partial_match: + return partial_match[1] + + def _set_region(self, region): + """ + Return the region to use for listing resources. + + :param region: A name, region object, None, or 'all' + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A region object or None if all regions should be considered + :rtype: :class:`GCERegion` or ``None`` + """ + region = region or self.region + + if region == 'all' or region is None: + return None + + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + return region + + def _set_zone(self, zone): + """ + Return the zone to use for listing resources. + + :param zone: A name, zone object, None, or 'all' + :type region: ``str`` or :class:`GCEZone` or ``None`` + + :return: A zone object or None if all zones should be considered + :rtype: :class:`GCEZone` or ``None`` + """ + zone = zone or self.zone + + if zone == 'all' or zone is None: + return None + + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + return zone + + def _create_node_req(self, name, size, image, location, network, + tags=None, metadata=None, boot_disk=None, + external_ip='ephemeral'): + """ + Returns a request and body to create a new node. This is a helper + method to support both :class:`create_node` and + :class:`ex_create_multiple_nodes`. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: :class:`GCENodeSize` + + :param image: The image to use to create the node (or, if using a + persistent disk, the image the disk was created from). + :type image: :class:`GCENodeImage` + + :param location: The location (zone) to create the node in. + :type location: :class:`NodeLocation` or :class:`GCEZone` + + :param network: The network to associate with the node. + :type network: :class:`GCENetwork` + + :keyword tags: A list of tags to associate with the node. + :type tags: ``list`` of ``str`` + + :keyword metadata: Metadata dictionary for instance. + :type metadata: ``dict`` + + :keyword boot_disk: Persistent boot disk to attach. + :type :class:`StorageVolume` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. To use an existing static IP address, + a GCEAddress object should be passed in. + :type external_ip: :class:`GCEAddress` or ``str`` or None + + :return: A tuple containing a request string and a node_data dict. + :rtype: ``tuple`` of ``str`` and ``dict`` + """ + node_data = {} + node_data['machineType'] = size.extra['selfLink'] + node_data['name'] = name + if tags: + node_data['tags'] = {'items': tags} + if metadata: + node_data['metadata'] = metadata + + if boot_disk: + disks = [{'kind': 'compute#attachedDisk', + 'boot': True, + 'type': 'PERSISTENT', + 'mode': 'READ_WRITE', + 'deviceName': boot_disk.name, + 'zone': boot_disk.extra['zone'].extra['selfLink'], + 'source': boot_disk.extra['selfLink']}] + node_data['disks'] = disks + else: + node_data['image'] = image.extra['selfLink'] + + ni = [{'kind': 'compute#instanceNetworkInterface', + 'network': network.extra['selfLink']}] + if external_ip: + access_configs = [{'name': 'External NAT', + 'type': 'ONE_TO_ONE_NAT'}] + if hasattr(external_ip, 'address'): + access_configs[0]['natIP'] = external_ip.address + ni[0]['accessConfigs'] = access_configs + node_data['networkInterfaces'] = ni + + request = '/zones/%s/instances' % (location.name) + + return request, node_data + + def _multi_create_disk(self, status, node_attrs): + """Create disk for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + disk = None + # Check for existing disk + if node_attrs['use_existing_disk']: + try: + disk = self.ex_get_volume(status['name'], + node_attrs['location']) + except ResourceNotFoundError: + pass + + if disk: + status['disk'] = disk + else: + # Create disk and return response object back in the status dict. + # Or, if there is an error, mark as failed. + disk_req, disk_data, disk_params = self._create_vol_req( + None, status['name'], location=node_attrs['location'], + image=node_attrs['image']) + try: + disk_res = self.connection.request( + disk_req, method='POST', data=disk_data, + params=disk_params).object + except GoogleBaseError: + e = self._catch_error( + ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + disk_res = None + status['disk'] = GCEFailedDisk(status['name'], + error, code) + status['disk_response'] = disk_res + + def _multi_check_disk(self, status, node_attrs): + """Check disk status for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + error = None + try: + response = self.connection.request( + status['disk_response']['selfLink']).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + response = {'status': 'DONE'} + if response['status'] == 'DONE': + status['disk_response'] = None + if error: + status['disk'] = GCEFailedDisk(status['name'], error, code) + else: + status['disk'] = self.ex_get_volume(status['name'], + node_attrs['location']) + + def _multi_create_node(self, status, node_attrs): + """Create node for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + # If disk has an error, set the node as failed and return + if hasattr(status['disk'], 'error'): + status['node'] = status['disk'] + return + + # Create node and return response object in status dictionary. + # Or, if there is an error, mark as failed. + request, node_data = self._create_node_req( + status['name'], node_attrs['size'], node_attrs['image'], + node_attrs['location'], node_attrs['network'], node_attrs['tags'], + node_attrs['metadata'], boot_disk=status['disk'], + external_ip=node_attrs['external_ip']) + try: + node_res = self.connection.request( + request, method='POST', data=node_data).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + node_res = None + status['node'] = GCEFailedNode(status['name'], + error, code) + status['node_response'] = node_res + + def _multi_check_node(self, status, node_attrs): + """Check node status for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + error = None + try: + response = self.connection.request( + status['node_response']['selfLink']).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + response = {'status': 'DONE'} + if response['status'] == 'DONE': + status['node_response'] = None + if error: + status['node'] = GCEFailedNode(status['name'], + error, code) + else: + status['node'] = self.ex_get_node(status['name'], + node_attrs['location']) + + def _create_vol_req(self, size, name, location=None, snapshot=None, + image=None): + """ + Assemble the request/data for creating a volume. + + Used by create_volume and ex_create_multiple_nodes + + :param size: Size of volume to create (in GB). Can be None if image + or snapshot is supplied. + :type size: ``int`` or ``str`` or ``None`` + + :param name: Name of volume to create + :type name: ``str`` + + :keyword location: Location (zone) to create the volume in + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :keyword snapshot: Snapshot to create image from + :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` + + :keyword image: Image to create disk from. + :type image: :class:`GCENodeImage` or ``str`` or ``None`` + + :return: Tuple containing the request string, the data dictionary and + the URL parameters + :rtype: ``tuple`` + """ + volume_data = {} + params = None + volume_data['name'] = name + if size: + volume_data['sizeGb'] = str(size) + if image: + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + params = {'sourceImage': image.extra['selfLink']} + volume_data['description'] = 'Image: %s' % ( + image.extra['selfLink']) + if snapshot: + if not hasattr(snapshot, 'name'): + # Check for full URI to not break backward-compatibility + if snapshot.startswith('https'): + snapshot = self._get_components_from_path(snapshot)['name'] + snapshot = self.ex_get_snapshot(snapshot) + snapshot_link = snapshot.extra['selfLink'] + volume_data['sourceSnapshot'] = snapshot_link + volume_data['description'] = 'Snapshot: %s' % (snapshot_link) + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + request = '/zones/%s/disks' % (location.name) + + return request, volume_data, params + + def _to_address(self, address): + """ + Return an Address object from the json-response dictionary. + + :param address: The dictionary describing the address. + :type address: ``dict`` + + :return: Address object + :rtype: :class:`GCEAddress` + """ + extra = {} + + region = self.ex_get_region(address['region']) + + extra['selfLink'] = address.get('selfLink') + extra['status'] = address.get('status') + extra['creationTimestamp'] = address.get('creationTimestamp') + + return GCEAddress(id=address['id'], name=address['name'], + address=address['address'], + region=region, driver=self, extra=extra) + + def _to_healthcheck(self, healthcheck): + """ + Return a HealthCheck object from the json-response dictionary. + + :param healthcheck: The dictionary describing the healthcheck. + :type healthcheck: ``dict`` + + :return: HealthCheck object + :rtype: :class:`GCEHealthCheck` + """ + extra = {} + extra['selfLink'] = healthcheck.get('selfLink') + extra['creationTimestamp'] = healthcheck.get('creationTimestamp') + extra['description'] = healthcheck.get('description') + extra['host'] = healthcheck.get('host') + + return GCEHealthCheck( + id=healthcheck['id'], name=healthcheck['name'], + path=healthcheck.get('requestPath'), port=healthcheck.get('port'), + interval=healthcheck.get('checkIntervalSec'), + timeout=healthcheck.get('timeoutSec'), + unhealthy_threshold=healthcheck.get('unhealthyThreshold'), + healthy_threshold=healthcheck.get('healthyThreshold'), + driver=self, extra=extra) + + def _to_firewall(self, firewall): + """ + Return a Firewall object from the json-response dictionary. + + :param firewall: The dictionary describing the firewall. + :type firewall: ``dict`` + + :return: Firewall object + :rtype: :class:`GCEFirewall` + """ + extra = {} + extra['selfLink'] = firewall.get('selfLink') + extra['creationTimestamp'] = firewall.get('creationTimestamp') + extra['description'] = firewall.get('description') + extra['network_name'] = self._get_components_from_path( + firewall['network'])['name'] + + network = self.ex_get_network(extra['network_name']) + source_ranges = firewall.get('sourceRanges') + source_tags = firewall.get('sourceTags') + target_tags = firewall.get('targetTags') + + return GCEFirewall(id=firewall['id'], name=firewall['name'], + allowed=firewall.get('allowed'), network=network, + source_ranges=source_ranges, + source_tags=source_tags, + target_tags=target_tags, + driver=self, extra=extra) + + def _to_forwarding_rule(self, forwarding_rule): + """ + Return a Forwarding Rule object from the json-response dictionary. + + :param forwarding_rule: The dictionary describing the rule. + :type forwarding_rule: ``dict`` + + :return: ForwardingRule object + :rtype: :class:`GCEForwardingRule` + """ + extra = {} + extra['selfLink'] = forwarding_rule.get('selfLink') + extra['portRange'] = forwarding_rule.get('portRange') + extra['creationTimestamp'] = forwarding_rule.get('creationTimestamp') + extra['description'] = forwarding_rule.get('description') + + region = self.ex_get_region(forwarding_rule['region']) + targetpool = self.ex_get_targetpool( + self._get_components_from_path(forwarding_rule['target'])['name']) + + return GCEForwardingRule(id=forwarding_rule['id'], + name=forwarding_rule['name'], region=region, + address=forwarding_rule.get('IPAddress'), + protocol=forwarding_rule.get('IPProtocol'), + targetpool=targetpool, + driver=self, extra=extra) + + def _to_network(self, network): + """ + Return a Network object from the json-response dictionary. + + :param network: The dictionary describing the network. + :type network: ``dict`` + + :return: Network object + :rtype: :class:`GCENetwork` + """ + extra = {} + + extra['selfLink'] = network.get('selfLink') + extra['gatewayIPv4'] = network.get('gatewayIPv4') + extra['description'] = network.get('description') + extra['creationTimestamp'] = network.get('creationTimestamp') + + return GCENetwork(id=network['id'], name=network['name'], + cidr=network.get('IPv4Range'), + driver=self, extra=extra) + + def _to_node_image(self, image): + """ + Return an Image object from the json-response dictionary. + + :param image: The dictionary describing the image. + :type image: ``dict`` + + :return: Image object + :rtype: :class:`GCENodeImage` + """ + extra = {} + extra['preferredKernel'] = image.get('preferredKernel', None) + extra['description'] = image.get('description', None) + extra['creationTimestamp'] = image.get('creationTimestamp') + extra['selfLink'] = image.get('selfLink') + extra['deprecated'] = image.get('deprecated', None) + + return GCENodeImage(id=image['id'], name=image['name'], driver=self, + extra=extra) + + def _to_node_location(self, location): + """ + Return a Location object from the json-response dictionary. + + :param location: The dictionary describing the location. + :type location: ``dict`` + + :return: Location object + :rtype: :class:`NodeLocation` + """ + return NodeLocation(id=location['id'], name=location['name'], + country=location['name'].split('-')[0], + driver=self) + + def _to_node(self, node): + """ + Return a Node object from the json-response dictionary. + + :param node: The dictionary describing the node. + :type node: ``dict`` + + :return: Node object + :rtype: :class:`Node` + """ + public_ips = [] + private_ips = [] + extra = {} + + extra['status'] = node.get('status') + extra['description'] = node.get('description') + extra['zone'] = self.ex_get_zone(node['zone']) + extra['image'] = node.get('image') + extra['machineType'] = node.get('machineType') + extra['disks'] = node.get('disks', []) + extra['networkInterfaces'] = node.get('networkInterfaces') + extra['id'] = node['id'] + extra['selfLink'] = node.get('selfLink') + extra['name'] = node['name'] + extra['metadata'] = node.get('metadata', {}) + extra['tags_fingerprint'] = node['tags']['fingerprint'] + extra['scheduling'] = node.get('scheduling', {}) + extra['deprecated'] = True if node.get('deprecated', None) else False + + for disk in extra['disks']: + if disk.get('boot') and disk.get('type') == 'PERSISTENT': + bd = self._get_components_from_path(disk['source']) + extra['boot_disk'] = self.ex_get_volume(bd['name'], bd['zone']) + + if 'items' in node['tags']: + tags = node['tags']['items'] + else: + tags = [] + extra['tags'] = tags + + for network_interface in node.get('networkInterfaces', []): + private_ips.append(network_interface.get('networkIP')) + for access_config in network_interface.get('accessConfigs', []): + public_ips.append(access_config.get('natIP')) + + # For the node attributes, use just machine and image names, not full + # paths. Full paths are available in the "extra" dict. + if extra['image']: + image = self._get_components_from_path(extra['image'])['name'] + else: + image = None + size = self._get_components_from_path(node['machineType'])['name'] + + return Node(id=node['id'], name=node['name'], + state=self.NODE_STATE_MAP[node['status']], + public_ips=public_ips, private_ips=private_ips, + driver=self, size=size, image=image, extra=extra) + + def _to_node_size(self, machine_type): + """ + Return a Size object from the json-response dictionary. + + :param machine_type: The dictionary describing the machine. + :type machine_type: ``dict`` + + :return: Size object + :rtype: :class:`GCENodeSize` + """ + extra = {} + extra['selfLink'] = machine_type.get('selfLink') + extra['zone'] = self.ex_get_zone(machine_type['zone']) + extra['description'] = machine_type.get('description') + extra['guestCpus'] = machine_type.get('guestCpus') + extra['creationTimestamp'] = machine_type.get('creationTimestamp') + try: + price = self._get_size_price(size_id=machine_type['name']) + except KeyError: + price = None + + return GCENodeSize(id=machine_type['id'], name=machine_type['name'], + ram=machine_type.get('memoryMb'), + disk=machine_type.get('imageSpaceGb'), + bandwidth=0, price=price, driver=self, extra=extra) + + def _to_project(self, project): + """ + Return a Project object from the json-response dictionary. + + :param project: The dictionary describing the project. + :type project: ``dict`` + + :return: Project object + :rtype: :class:`GCEProject` + """ + extra = {} + extra['selfLink'] = project.get('selfLink') + extra['creationTimestamp'] = project.get('creationTimestamp') + extra['description'] = project.get('description') + metadata = project['commonInstanceMetadata'].get('items') + + return GCEProject(id=project['id'], name=project['name'], + metadata=metadata, quotas=project.get('quotas'), + driver=self, extra=extra) + + def _to_region(self, region): + """ + Return a Region object from the json-response dictionary. + + :param region: The dictionary describing the region. + :type region: ``dict`` + + :return: Region object + :rtype: :class:`GCERegion` + """ + extra = {} + extra['selfLink'] = region.get('selfLink') + extra['creationTimestamp'] = region.get('creationTimestamp') + extra['description'] = region.get('description') + + quotas = region.get('quotas') + zones = [self.ex_get_zone(z) for z in region.get('zones', [])] + # Work around a bug that will occasionally list missing zones in the + # region output + zones = [z for z in zones if z is not None] + deprecated = region.get('deprecated') + + return GCERegion(id=region['id'], name=region['name'], + status=region.get('status'), zones=zones, + quotas=quotas, deprecated=deprecated, + driver=self, extra=extra) + + def _to_snapshot(self, snapshot): + """ + Return a Snapshot object from the json-response dictionary. + + :param snapshot: The dictionary describing the snapshot + :type snapshot: ``dict`` + + :return: Snapshot object + :rtype: :class:`VolumeSnapshot` + """ + extra = {} + extra['selfLink'] = snapshot.get('selfLink') + extra['creationTimestamp'] = snapshot.get('creationTimestamp') + extra['sourceDisk'] = snapshot.get('sourceDisk') + + return GCESnapshot(id=snapshot['id'], name=snapshot['name'], + size=snapshot['diskSizeGb'], + status=snapshot.get('status'), driver=self, + extra=extra) + + def _to_storage_volume(self, volume): + """ + Return a Volume object from the json-response dictionary. + + :param volume: The dictionary describing the volume. + :type volume: ``dict`` + + :return: Volume object + :rtype: :class:`StorageVolume` + """ + extra = {} + extra['selfLink'] = volume.get('selfLink') + extra['zone'] = self.ex_get_zone(volume['zone']) + extra['status'] = volume.get('status') + extra['creationTimestamp'] = volume.get('creationTimestamp') + extra['description'] = volume.get('description') + + return StorageVolume(id=volume['id'], name=volume['name'], + size=volume['sizeGb'], driver=self, extra=extra) + + def _to_targetpool(self, targetpool): + """ + Return a Target Pool object from the json-response dictionary. + + :param targetpool: The dictionary describing the volume. + :type targetpool: ``dict`` + + :return: Target Pool object + :rtype: :class:`GCETargetPool` + """ + extra = {} + extra['selfLink'] = targetpool.get('selfLink') + extra['description'] = targetpool.get('description') + region = self.ex_get_region(targetpool['region']) + healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h + in targetpool.get('healthChecks', [])] + node_list = [] + for n in targetpool.get('instances', []): + # Nodes that do not exist can be part of a target pool. If the + # node does not exist, use the URL of the node instead of the node + # object. + comp = self._get_components_from_path(n) + try: + node = self.ex_get_node(comp['name'], comp['zone']) + except ResourceNotFoundError: + node = n + node_list.append(node) + + return GCETargetPool(id=targetpool['id'], name=targetpool['name'], + region=region, healthchecks=healthcheck_list, + nodes=node_list, driver=self, extra=extra) + + def _to_zone(self, zone): + """ + Return a Zone object from the json-response dictionary. + + :param zone: The dictionary describing the zone. + :type zone: ``dict`` + + :return: Zone object + :rtype: :class:`GCEZone` + """ + extra = {} + extra['selfLink'] = zone.get('selfLink') + extra['creationTimestamp'] = zone.get('creationTimestamp') + extra['description'] = zone.get('description') + + deprecated = zone.get('deprecated') + + return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'], + maintenance_windows=zone.get('maintenanceWindows'), + deprecated=deprecated, driver=self, extra=extra) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/gogrid.py libcloud-0.15.1/libcloud/compute/drivers/gogrid.py --- libcloud-0.5.0/libcloud/compute/drivers/gogrid.py 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/gogrid.py 2014-07-02 18:47:55.000000000 +0000 @@ -19,6 +19,8 @@ import hashlib import copy +from libcloud.utils.py3 import b + from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver from libcloud.compute.providers import Provider @@ -37,36 +39,41 @@ } GOGRID_INSTANCE_TYPES = { - '512MB': {'id': '512MB', - 'name': '512MB', - 'ram': 512, - 'disk': 30, - 'bandwidth': None}, - '1GB': {'id': '1GB', - 'name': '1GB', - 'ram': 1024, - 'disk': 60, - 'bandwidth': None}, - '2GB': {'id': '2GB', - 'name': '2GB', - 'ram': 2048, - 'disk': 120, - 'bandwidth': None}, - '4GB': {'id': '4GB', - 'name': '4GB', - 'ram': 4096, - 'disk': 240, - 'bandwidth': None}, - '8GB': {'id': '8GB', - 'name': '8GB', - 'ram': 8192, - 'disk': 480, - 'bandwidth': None}, - '16GB': {'id': '16GB', - 'name': '16GB', - 'ram': 16384, - 'disk': 960, - 'bandwidth': None}, + '512MB': {'id': '512MB', + 'name': '512MB', + 'ram': 512, + 'disk': 30, + 'bandwidth': None}, + '1GB': {'id': '1GB', + 'name': '1GB', + 'ram': 1024, + 'disk': 60, + 'bandwidth': None}, + '2GB': {'id': '2GB', + 'name': '2GB', + 'ram': 2048, + 'disk': 120, + 'bandwidth': None}, + '4GB': {'id': '4GB', + 'name': '4GB', + 'ram': 4096, + 'disk': 240, + 'bandwidth': None}, + '8GB': {'id': '8GB', + 'name': '8GB', + 'ram': 8192, + 'disk': 480, + 'bandwidth': None}, + '16GB': {'id': '16GB', + 'name': '16GB', + 'ram': 16384, + 'disk': 960, + 'bandwidth': None}, + '24GB': {'id': '24GB', + 'name': '24GB', + 'ram': 24576, + 'disk': 960, + 'bandwidth': None}, } @@ -78,9 +85,10 @@ # so uuid of node should not change after add is completed def get_uuid(self): return hashlib.sha1( - "%s:%d" % (self.public_ip,self.driver.type) + b("%s:%s" % (self.public_ips, self.driver.type)) ).hexdigest() + class GoGridNodeDriver(BaseGoGridDriver, NodeDriver): """ GoGrid node driver @@ -90,10 +98,17 @@ type = Provider.GOGRID api_name = 'gogrid' name = 'GoGrid' + website = 'http://www.gogrid.com/' features = {"create_node": ["generates_password"]} _instance_types = GOGRID_INSTANCE_TYPES + def __init__(self, *args, **kwargs): + """ + @inherits: :class:`NodeDriver.__init__` + """ + super(GoGridNodeDriver, self).__init__(*args, **kwargs) + def _get_state(self, element): try: return STATE[element['state']['name']] @@ -112,12 +127,13 @@ ip = self._get_ip(element) id = self._get_id(element) n = GoGridNode(id=id, - name=element['name'], - state=state, - public_ip=[ip], - private_ip=[], - extra={'ram': element.get('ram').get('name')}, - driver=self.connection.driver) + name=element['name'], + state=state, + public_ips=[ip], + private_ips=[], + extra={'ram': element.get('ram').get('name'), + 'description': element.get('description', '')}, + driver=self.connection.driver) if password: n.extra['password'] = password @@ -130,14 +146,14 @@ return n def _to_images(self, object): - return [ self._to_image(el) - for el in object['list'] ] + return [self._to_image(el) + for el in object['list']] def _to_location(self, element): location = NodeLocation(id=element['id'], - name=element['name'], - country="US", - driver=self.connection.driver) + name=element['name'], + country="US", + driver=self.connection.driver) return location def _to_locations(self, object): @@ -149,28 +165,37 @@ if location is not None: params["datacenter"] = location.id images = self._to_images( - self.connection.request('/api/grid/image/list', params).object) + self.connection.request('/api/grid/image/list', params).object) return images def list_nodes(self): + """ + @inherits: :class:`NodeDriver.list_nodes` + :rtype: ``list`` of :class:`GoGridNode` + """ passwords_map = {} res = self._server_list() try: - for password in self._password_list()['list']: - try: - passwords_map[password['server']['id']] = password['password'] - except KeyError: - pass + for password in self._password_list()['list']: + try: + passwords_map[password['server']['id']] = \ + password['password'] + except KeyError: + pass except InvalidCredsError: - # some gogrid API keys don't have permission to access the password list. - pass + # some gogrid API keys don't have permission to access the + # password list. + pass - return [ self._to_node(el, passwords_map.get(el.get('id'))) - for el - in res['list'] ] + return [self._to_node(el, passwords_map.get(el.get('id'))) + for el in res['list']] def reboot_node(self, node): + """ + @inherits: :class:`NodeDriver.reboot_node` + :type node: :class:`GoGridNode` + """ id = node.id power = 'restart' res = self._server_power(id, power) @@ -179,6 +204,10 @@ return True def destroy_node(self, node): + """ + @inherits: :class:`NodeDriver.reboot_node` + :type node: :class:`GoGridNode` + """ id = node.id res = self._server_delete(id) if not res.success(): @@ -195,26 +224,26 @@ # power in ['start', 'stop', 'restart'] params = {'id': id, 'power': power} return self.connection.request("/api/grid/server/power", params, - method='POST') + method='POST') def _server_delete(self, id): params = {'id': id} return self.connection.request("/api/grid/server/delete", params, - method='POST') + method='POST') def _get_first_ip(self, location=None): ips = self.ex_list_ips(public=True, assigned=False, location=location) try: - return ips[0].ip + return ips[0].ip except IndexError: raise LibcloudError('No public unassigned IPs left', - GoGridNodeDriver) + GoGridNodeDriver) def list_sizes(self, location=None): sizes = [] - for key, values in self._instance_types.iteritems(): + for key, values in self._instance_types.items(): attributes = copy.deepcopy(values) - attributes.update({ 'price': self._get_size_price(size_id=key) }) + attributes.update({'price': self._get_size_price(size_id=key)}) sizes.append(NodeSize(driver=self.connection.driver, **attributes)) return sizes @@ -222,16 +251,37 @@ def list_locations(self): locations = self._to_locations( self.connection.request('/api/common/lookup/list', - params={'lookup': 'ip.datacenter'}).object) + params={'lookup': 'ip.datacenter'}).object) return locations def ex_create_node_nowait(self, **kwargs): """Don't block until GoGrid allocates id for a node but return right away with id == None. - The existance of this method is explained by the fact + The existence of this method is explained by the fact that GoGrid assigns id to a node only few minutes after - creation.""" + creation. + + + :keyword name: String with a name for this new node (required) + :type name: ``str`` + + :keyword size: The size of resources allocated to this node . + (required) + :type size: :class:`NodeSize` + + :keyword image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` + + :keyword ex_description: Description of a Node + :type ex_description: ``str`` + + :keyword ex_ip: Public IP address to use for a Node. If not + specified, first available IP address will be picked + :type ex_ip: ``str`` + + :rtype: :class:`GoGridNode` + """ name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] @@ -255,13 +305,16 @@ def create_node(self, **kwargs): """Create a new GoGird node - See L{NodeDriver.create_node} for more keyword args. + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_description: Description of a Node + :type ex_description: ``str`` - @keyword ex_description: Description of a Node - @type ex_description: C{string} - @keyword ex_ip: Public IP address to use for a Node. If not + :keyword ex_ip: Public IP address to use for a Node. If not specified, first available IP address will be picked - @type ex_ip: C{string} + :type ex_ip: ``str`` + + :rtype: :class:`GoGridNode` """ node = self.ex_create_node_nowait(**kwargs) @@ -273,14 +326,16 @@ nodes = self.list_nodes() for i in nodes: - if i.public_ip[0] == node.public_ip[0] and i.id is not None: + if i.public_ips[0] == node.public_ips[0] and i.id is not None: return i waittime += interval time.sleep(interval) if id is None: - raise Exception("Wasn't able to wait for id allocation for the node %s" % str(node)) + raise Exception( + "Wasn't able to wait for id allocation for the node %s" + % str(node)) return node @@ -292,10 +347,13 @@ http://wiki.gogrid.com/wiki/index.php/MyGSI - @keyword node: node to use as a base for image - @type node: L{Node} - @keyword name: name for new image - @type name: C{string} + :keyword node: node to use as a base for image + :type node: :class:`GoGridNode` + + :keyword name: name for new image + :type name: ``str`` + + :rtype: :class:`NodeImage` """ params = {'server': node.id, 'friendlyName': name} @@ -307,46 +365,54 @@ def ex_edit_node(self, **kwargs): """Change attributes of a node. - @keyword node: node to be edited - @type node: L{Node} - @keyword size: new size of a node - @type size: L{NodeSize} - @keyword ex_description: new description of a node - @type ex_description: C{string} + :keyword node: node to be edited (required) + :type node: :class:`GoGridNode` + + :keyword size: new size of a node (required) + :type size: :class:`NodeSize` + + :keyword ex_description: new description of a node + :type ex_description: ``str`` + + :rtype: :class:`Node` """ node = kwargs['node'] size = kwargs['size'] params = {'id': node.id, - 'server.ram': size.id} + 'server.ram': size.id} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] object = self.connection.request('/api/grid/server/edit', - params=params).object + params=params).object return self._to_node(object['list'][0]) def ex_edit_image(self, **kwargs): """Edit metadata of a server image. - @keyword image: image to be edited - @type image: L{NodeImage} - @keyword public: should be the image public? - @type public: C{bool} - @keyword ex_description: description of the image (optional) - @type ex_description: C{string} - @keyword name: name of the image - @type name C{string} + :keyword image: image to be edited (required) + :type image: :class:`NodeImage` + + :keyword public: should be the image public (required) + :type public: ``bool`` + + :keyword ex_description: description of the image (optional) + :type ex_description: ``str`` + :keyword name: name of the image + :type name: ``str`` + + :rtype: :class:`NodeImage` """ image = kwargs['image'] public = kwargs['public'] params = {'id': image.id, - 'isPublic': str(public).lower()} + 'isPublic': str(public).lower()} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] @@ -355,7 +421,7 @@ params['friendlyName'] = kwargs['name'] object = self.connection.request('/api/grid/image/edit', - params=params).object + params=params).object return self._to_image(object['list'][0]) @@ -363,33 +429,36 @@ """Return list of IP addresses assigned to the account. - @keyword public: set to True to list only + :keyword public: set to True to list only public IPs or False to list only private IPs. Set to None or not specify at all not to filter by type - @type public: C{bool} - @keyword assigned: set to True to list only addresses + :type public: ``bool`` + + :keyword assigned: set to True to list only addresses assigned to servers, False to list unassigned addresses and set to None or don't set at all not no filter by state - @type assigned: C{bool} - @keyword location: filter IP addresses by location - @type location: L{NodeLocation} - @return: C{list} of L{GoGridIpAddress}es + :type assigned: ``bool`` + + :keyword location: filter IP addresses by location + :type location: :class:`NodeLocation` + + :rtype: ``list`` of :class:`GoGridIpAddress` """ params = {} if "public" in kwargs and kwargs["public"] is not None: params["ip.type"] = {True: "Public", - False: "Private"}[kwargs["public"]] + False: "Private"}[kwargs["public"]] if "assigned" in kwargs and kwargs["assigned"] is not None: params["ip.state"] = {True: "Assigned", - False: "Unassigned"}[kwargs["assigned"]] + False: "Unassigned"}[kwargs["assigned"]] if "location" in kwargs and kwargs['location'] is not None: params['datacenter'] = kwargs['location'].id ips = self._to_ips( - self.connection.request('/api/grid/ip/list', - params=params).object) + self.connection.request('/api/grid/ip/list', + params=params).object) return ips diff -Nru libcloud-0.5.0/libcloud/compute/drivers/gridspot.py libcloud-0.15.1/libcloud/compute/drivers/gridspot.py --- libcloud-0.5.0/libcloud/compute/drivers/gridspot.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/gridspot.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.base import NodeDriver, Node +from libcloud.compute.base import NodeState +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.compute.types import Provider +from libcloud.common.types import InvalidCredsError + + +class GridspotAPIException(Exception): + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + + +class GridspotResponse(JsonResponse): + """ + Response class for Gridspot + """ + def parse_body(self): + body = super(GridspotResponse, self).parse_body() + + if 'exception_name' in body and body['exception_name']: + raise GridspotAPIException(body['exception_name']) + + return body + + def parse_error(self): + # Gridspot 404s on invalid api key or instance_id + raise InvalidCredsError("Invalid api key/instance_id") + + +class GridspotConnection(ConnectionKey): + """ + Connection class to connect to Gridspot's API servers + """ + + host = 'gridspot.com' + responseCls = GridspotResponse + + def add_default_params(self, params): + params['api_key'] = self.key + return params + + +class GridspotNodeDriver(NodeDriver): + """ + Gridspot (http://www.gridspot.com/) node driver. + """ + + type = Provider.GRIDSPOT + name = 'Gridspot' + website = 'http://www.gridspot.com/' + connectionCls = GridspotConnection + NODE_STATE_MAP = { + 'Running': NodeState.RUNNING, + 'Starting': NodeState.PENDING + } + + def list_nodes(self): + data = self.connection.request( + '/compute_api/v1/list_instances').object + return [self._to_node(n) for n in data['instances']] + + def destroy_node(self, node): + data = {'instance_id': node.id} + self.connection.request('/compute_api/v1/stop_instance', data).object + return True + + def _get_node_state(self, state): + result = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN) + return result + + def _add_int_param(self, params, data, field): + if data[field]: + try: + params[field] = int(data[field]) + except: + pass + + def _to_node(self, data): + port = None + ip = None + + state = self._get_node_state(data['current_state']) + + if data['vm_ssh_wan_ip_endpoint'] != 'null': + parts = data['vm_ssh_wan_ip_endpoint'].split(':') + ip = parts[0] + port = int(parts[1]) + + extra_params = { + 'winning_bid_id': data['winning_bid_id'], + 'port': port + } + + # Spec is vague and doesn't indicate if these will always be present + self._add_int_param(extra_params, data, 'vm_num_logical_cores') + self._add_int_param(extra_params, data, 'vm_num_physical_cores') + self._add_int_param(extra_params, data, 'vm_ram') + self._add_int_param(extra_params, data, 'start_state_time') + self._add_int_param(extra_params, data, 'ended_state_time') + self._add_int_param(extra_params, data, 'running_state_time') + + return Node( + id=data['instance_id'], + name=data['instance_id'], + state=state, + public_ips=[ip], + private_ips=[], + driver=self.connection.driver, + extra=extra_params) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/hostvirtual.py libcloud-0.15.1/libcloud/compute/drivers/hostvirtual.py --- libcloud-0.5.0/libcloud/compute/drivers/hostvirtual.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/hostvirtual.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,341 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +libcloud driver for the Host Virtual Inc. (VR) API +Home page http://www.vr.org/ +""" + +import time + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.hostvirtual import HostVirtualResponse +from libcloud.common.hostvirtual import HostVirtualConnection +from libcloud.common.hostvirtual import HostVirtualException +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeImage, NodeSize, NodeLocation +from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword + +API_ROOT = '' + +NODE_STATE_MAP = { + 'BUILDING': NodeState.PENDING, + 'PENDING': NodeState.PENDING, + 'RUNNING': NodeState.RUNNING, # server is powered up + 'STOPPING': NodeState.REBOOTING, + 'REBOOTING': NodeState.REBOOTING, + 'STARTING': NodeState.REBOOTING, + 'TERMINATED': NodeState.TERMINATED, # server is powered down + 'STOPPED': NodeState.STOPPED +} + +DEFAULT_NODE_LOCATION_ID = 4 + + +class HostVirtualComputeResponse(HostVirtualResponse): + pass + + +class HostVirtualComputeConnection(HostVirtualConnection): + responseCls = HostVirtualComputeResponse + + +class HostVirtualNodeDriver(NodeDriver): + type = Provider.HOSTVIRTUAL + name = 'HostVirtual' + website = 'http://www.vr.org' + connectionCls = HostVirtualComputeConnection + features = {'create_node': ['ssh_key', 'password']} + + def __init__(self, key, secure=True, host=None, port=None): + self.location = None + super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure, + host=host, port=port) + + def _to_node(self, data): + state = NODE_STATE_MAP[data['status']] + public_ips = [] + private_ips = [] + extra = {} + + if 'plan_id' in data: + extra['size'] = data['plan_id'] + if 'os_id' in data: + extra['image'] = data['os_id'] + if 'location_id' in data: + extra['location'] = data['location_id'] + if 'ip' in data: + public_ips.append(data['ip']) + + node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self.connection.driver, extra=extra) + return node + + def list_locations(self): + result = self.connection.request(API_ROOT + '/cloud/locations/').object + locations = [] + for dc in result: + locations.append(NodeLocation( + dc["id"], + dc["name"], + dc["name"].split(',')[1].replace(" ", ""), # country + self)) + return locations + + def list_sizes(self, location=None): + params = {} + if location: + params = {'location': location.id} + result = self.connection.request( + API_ROOT + '/cloud/sizes/', + data=json.dumps(params)).object + sizes = [] + for size in result: + n = NodeSize(id=size['plan_id'], + name=size['plan'], + ram=size['ram'], + disk=size['disk'], + bandwidth=size['transfer'], + price=size['price'], + driver=self.connection.driver) + sizes.append(n) + return sizes + + def list_images(self): + result = self.connection.request(API_ROOT + '/cloud/images/').object + images = [] + for image in result: + i = NodeImage(id=image["id"], + name=image["os"], + driver=self.connection.driver, + extra=image) + del i.extra['id'] + del i.extra['os'] + images.append(i) + return images + + def list_nodes(self): + result = self.connection.request(API_ROOT + '/cloud/servers/').object + nodes = [] + for value in result: + node = self._to_node(value) + nodes.append(node) + return nodes + + def _wait_for_node(self, node_id, timeout=30, interval=5.0): + """ + :param node_id: ID of the node to wait for. + :type node_id: ``int`` + + :param timeout: Timeout (in seconds). + :type timeout: ``int`` + + :param interval: How long to wait (in seconds) between each attempt. + :type interval: ``float`` + """ + # poll until we get a node + for i in range(0, timeout, int(interval)): + try: + node = self.ex_get_node(node_id) + return node + except HostVirtualException: + time.sleep(interval) + + raise HostVirtualException(412, 'Timedout on getting node details') + + def create_node(self, **kwargs): + dc = None + + size = kwargs['size'] + image = kwargs['image'] + + auth = self._get_and_check_auth(kwargs.get('auth')) + + params = {'plan': size.name} + + dc = DEFAULT_NODE_LOCATION_ID + if 'location' in kwargs: + dc = kwargs['location'].id + + # simply order a package first + result = self.connection.request(API_ROOT + '/cloud/buy/', + data=json.dumps(params), + method='POST').object + + # create a stub node + stub_node = self._to_node({ + 'mbpkgid': result['id'], + 'status': 'PENDING', + 'fqdn': kwargs['name'], + 'plan_id': size.id, + 'os_id': image.id, + 'location_id': dc + }) + + # provisioning a server using the stub node + self.ex_provision_node(node=stub_node, auth=auth) + node = self._wait_for_node(stub_node.id) + + if getattr(auth, 'generated', False): + node.extra['password'] = auth.password + + return node + + def reboot_node(self, node): + params = {'force': 0, 'mbpkgid': node.id} + result = self.connection.request( + API_ROOT + '/cloud/server/reboot', + data=json.dumps(params), + method='POST').object + + return bool(result) + + def destroy_node(self, node): + params = { + 'mbpkgid': node.id, + # 'reason': 'Submitted through Libcloud API' + } + + result = self.connection.request( + API_ROOT + '/cloud/cancel', data=json.dumps(params), + method='POST').object + + return bool(result) + + def ex_get_node(self, node_id): + """ + Get a single node. + + :param node_id: id of the node that we need the node object for + :type node_id: ``str`` + + :rtype: :class:`Node` + """ + + params = {'mbpkgid': node_id} + result = self.connection.request( + API_ROOT + '/cloud/server', params=params).object + node = self._to_node(result) + return node + + def ex_stop_node(self, node): + """ + Stop a node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + params = {'force': 0, 'mbpkgid': node.id} + result = self.connection.request( + API_ROOT + '/cloud/server/shutdown', + data=json.dumps(params), + method='POST').object + + return bool(result) + + def ex_start_node(self, node): + """ + Start a node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + params = {'mbpkgid': node.id} + result = self.connection.request( + API_ROOT + '/cloud/server/start', + data=json.dumps(params), + method='POST').object + + return bool(result) + + def ex_provision_node(self, **kwargs): + """ + Provision a server on a VR package and get it booted + + :keyword node: node which should be used + :type node: :class:`Node` + + :keyword image: The distribution to deploy on your server (mandatory) + :type image: :class:`NodeImage` + + :keyword auth: an SSH key or root password (mandatory) + :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` + + :keyword location: which datacenter to create the server in + :type location: :class:`NodeLocation` + + :return: Node representing the newly built server + :rtype: :class:`Node` + """ + + node = kwargs['node'] + + if 'image' in kwargs: + image = kwargs['image'] + else: + image = node.extra['image'] + + params = { + 'mbpkgid': node.id, + 'image': image, + 'fqdn': node.name, + 'location': node.extra['location'], + } + + auth = kwargs['auth'] + + ssh_key = None + password = None + if isinstance(auth, NodeAuthSSHKey): + ssh_key = auth.pubkey + params['ssh_key'] = ssh_key + elif isinstance(auth, NodeAuthPassword): + password = auth.password + params['password'] = password + + if not ssh_key and not password: + raise HostVirtualException(500, "Need SSH key or Root password") + + result = self.connection.request(API_ROOT + '/cloud/server/build', + data=json.dumps(params), + method='POST').object + return bool(result) + + def ex_delete_node(self, node): + """ + Delete a node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + + params = {'mbpkgid': node.id} + result = self.connection.request( + API_ROOT + '/cloud/server/delete', data=json.dumps(params), + method='POST').object + + return bool(result) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/hpcloud.py libcloud-0.15.1/libcloud/compute/drivers/hpcloud.py --- libcloud-0.5.0/libcloud/compute/drivers/hpcloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/hpcloud.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,99 @@ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +HP Public cloud driver which is esentially just a small wrapper around +OpenStack driver. +""" + +from libcloud.compute.types import Provider, LibcloudError +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver + + +__all__ = [ + 'HPCloudNodeDriver' +] + +ENDPOINT_ARGS_MAP = { + 'region-a.geo-1': { + 'service_type': 'compute', + 'name': 'Compute', + 'region': 'region-a.geo-1' + }, + 'region-b.geo-1': { + 'service_type': 'compute', + 'name': 'Compute', + 'region': 'region-b.geo-1' + }, +} + +AUTH_URL_TEMPLATE = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens' + + +class HPCloudConnection(OpenStack_1_1_Connection): + _auth_version = '2.0_password' + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) + super(HPCloudConnection, self).__init__(*args, **kwargs) + + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'HPCloudConnection must have get_endpoint_args set') + + if '2.0_password' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + + public_url = ep.get('publicURL', None) + + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + return public_url + + +class HPCloudNodeDriver(OpenStack_1_1_NodeDriver): + name = 'HP Public Cloud (Helion)' + website = 'http://www.hpcloud.com/' + connectionCls = HPCloudConnection + type = Provider.HPCLOUD + + def __init__(self, key, secret, tenant_name, secure=True, + host=None, port=None, region='region-b.geo-1', **kwargs): + """ + Note: tenant_name argument is required for HP cloud. + """ + self.tenant_name = tenant_name + super(HPCloudNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + region=region, + **kwargs) + + def _ex_connection_class_kwargs(self): + endpoint_args = ENDPOINT_ARGS_MAP[self.region] + + kwargs = self.openstack_connection_kwargs() + kwargs['region'] = self.region + kwargs['get_endpoint_args'] = endpoint_args + kwargs['ex_force_auth_url'] = AUTH_URL_TEMPLATE % (self.region) + kwargs['ex_tenant_name'] = self.tenant_name + + return kwargs diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ibm_sbc.py libcloud-0.15.1/libcloud/compute/drivers/ibm_sbc.py --- libcloud-0.5.0/libcloud/compute/drivers/ibm_sbc.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ibm_sbc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Driver for the IBM Developer Cloud. -""" -import base64, urllib - -from libcloud.common.base import Response, ConnectionUserAndKey -from libcloud.common.types import InvalidCredsError -from libcloud.compute.types import NodeState, Provider -from libcloud.compute.base import NodeDriver, Node, NodeImage, NodeSize, NodeLocation, NodeAuthSSHKey - -from xml.etree import ElementTree as ET - -HOST = 'www-147.ibm.com' -REST_BASE = '/computecloud/enterprise/api/rest/20100331' - -class IBMResponse(Response): - def success(self): - return int(self.status) == 200 - - def parse_body(self): - if not self.body: - return None - return ET.XML(self.body) - - def parse_error(self): - if int(self.status) == 401: - if not self.body: - raise InvalidCredsError(str(self.status) + ': ' + self.error) - else: - raise InvalidCredsError(self.body) - return self.body - -class IBMConnection(ConnectionUserAndKey): - """ - Connection class for the IBM Developer Cloud driver - """ - - host = HOST - responseCls = IBMResponse - - def add_default_headers(self, headers): - headers['Accept'] = 'text/xml' - headers['Authorization'] = ('Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key)))) - if not 'Content-Type' in headers: - headers['Content-Type'] = 'text/xml' - return headers - - def encode_data(self, data): - return urllib.urlencode(data) - -class IBMNodeDriver(NodeDriver): - """ - IBM Developer Cloud node driver. - """ - connectionCls = IBMConnection - type = Provider.IBM - name = "IBM Developer Cloud" - - NODE_STATE_MAP = { 0: NodeState.PENDING, # New - 1: NodeState.PENDING, # Provisioning - 2: NodeState.TERMINATED, # Failed - 3: NodeState.TERMINATED, # Removed - 4: NodeState.TERMINATED, # Rejected - 5: NodeState.RUNNING, # Active - 6: NodeState.UNKNOWN, # Unknown - 7: NodeState.PENDING, # Deprovisioning - 8: NodeState.REBOOTING, # Restarting - 9: NodeState.PENDING, # Starting - 10: NodeState.PENDING, # Stopping - 11: NodeState.TERMINATED,# Stopped - 12: NodeState.PENDING, # Deprovision Pending - 13: NodeState.PENDING, # Restart Pending - 14: NodeState.PENDING, # Attaching - 15: NodeState.PENDING } # Detaching - - def create_node(self, **kwargs): - """ - Creates a node in the IBM Developer Cloud. - - See L{NodeDriver.create_node} for more keyword args. - - @keyword ex_configurationData: Image-specific configuration parameters. - Configuration parameters are defined in - the parameters.xml file. The URL to - this file is defined in the NodeImage - at extra[parametersURL]. - @type ex_configurationData: C{dict} - """ - - # Compose headers for message body - data = {} - data.update({'name': kwargs['name']}) - data.update({'imageID': kwargs['image'].id}) - data.update({'instanceType': kwargs['size'].id}) - if 'location' in kwargs: - data.update({'location': kwargs['location'].id}) - else: - data.update({'location': '1'}) - if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey): - data.update({'publicKey': kwargs['auth'].pubkey}) - if 'ex_configurationData' in kwargs: - configurationData = kwargs['ex_configurationData'] - for key in configurationData.keys(): - data.update({key: configurationData.get(key)}) - - # Send request! - resp = self.connection.request(action = REST_BASE + '/instances', - headers = {'Content-Type': 'application/x-www-form-urlencoded'}, - method = 'POST', - data = data).object - return self._to_nodes(resp)[0] - - def destroy_node(self, node): - url = REST_BASE + '/instances/%s' % (node.id) - status = int(self.connection.request(action = url, method='DELETE').status) - return status == 200 - - def reboot_node(self, node): - url = REST_BASE + '/instances/%s' % (node.id) - headers = {'Content-Type': 'application/x-www-form-urlencoded'} - data = {'state': 'restart'} - - resp = self.connection.request(action = url, - method = 'PUT', - headers = headers, - data = data) - return int(resp.status) == 200 - - def list_nodes(self): - return self._to_nodes(self.connection.request(REST_BASE + '/instances').object) - - def list_images(self, location = None): - return self._to_images(self.connection.request(REST_BASE + '/offerings/image').object) - - def list_sizes(self, location = None): - return [ NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None, None, self.connection.driver), - NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None, None, None, self.connection.driver), - NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None, None, self.connection.driver), - NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None, None, self.connection.driver), - NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None, None, self.connection.driver), - NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None, None, None, self.connection.driver), - NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None, None, self.connection.driver), - NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None, None, None, self.connection.driver), - NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit', None, None, None, None, self.connection.driver) ] - - def list_locations(self): - return self._to_locations(self.connection.request(REST_BASE + '/locations').object) - - def _to_nodes(self, object): - return [ self._to_node(instance) for instance in object.findall('Instance') ] - - def _to_node(self, instance): - return Node(id = instance.findtext('ID'), - name = instance.findtext('Name'), - state = self.NODE_STATE_MAP[int(instance.findtext('Status'))], - public_ip = instance.findtext('IP'), - private_ip = None, - driver = self.connection.driver) - - def _to_images(self, object): - return [ self._to_image(image) for image in object.findall('Image') ] - - def _to_image(self, image): - return NodeImage(id = image.findtext('ID'), - name = image.findtext('Name'), - driver = self.connection.driver, - extra = {'parametersURL': image.findtext('Manifest')}) - - def _to_locations(self, object): - return [ self._to_location(location) for location in object.findall('Location') ] - - def _to_location(self, location): - # NOTE: country currently hardcoded - return NodeLocation(id = location.findtext('ID'), - name = location.findtext('Name'), - country = 'US', - driver = self.connection.driver) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ibm_sce.py libcloud-0.15.1/libcloud/compute/drivers/ibm_sce.py --- libcloud-0.5.0/libcloud/compute/drivers/ibm_sce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ibm_sce.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,753 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Driver for IBM SmartCloud Enterprise + +Formerly known as: +- IBM Developer Cloud +- IBM Smart Business Development and Test on the IBM Cloud +- IBM SmartBusiness Cloud +""" + +import base64 +import time + +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, Node, NodeImage, \ + NodeSize, NodeLocation, NodeAuthSSHKey, StorageVolume + +HOST = 'www-147.ibm.com' +REST_BASE = '/computecloud/enterprise/api/rest/20100331' + + +class IBMResponse(XmlResponse): + def success(self): + return int(self.status) == 200 + + def parse_error(self): + if int(self.status) == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + return self.body + + +class IBMConnection(ConnectionUserAndKey): + """ + Connection class for the IBM SmartCloud Enterprise driver + """ + + host = HOST + responseCls = IBMResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'text/xml' + headers['Authorization'] = ('Basic %s' % (base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))) + if 'Content-Type' not in headers: + headers['Content-Type'] = 'text/xml' + return headers + + def encode_data(self, data): + return urlencode(data) + + +class IBMNodeLocation(NodeLocation): + """ + Extends the base LibCloud NodeLocation to contain additional attributes + """ + def __init__(self, id, name, country, driver, extra=None): + self.id = str(id) + self.name = name + self.country = country + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return ('' % + (self.id, self.name, self.country, self.driver.name, + self.extra)) + + +class VolumeState(object): + """ + The SCE specific states for a storage volume + """ + NEW = '0' + CREATING = '1' + DELETING = '2' + DELETED = '3' + DETACHED = '4' + ATTACHED = '5' + FAILED = '6' + DELETE_PENDING = '7' + BEING_CLONED = '8' + CLONING = '9' + ATTACHING = '10' + DETACHING = '11' + ATTACHIED = '12' + IMPORTING = '13' + TRANSFER_RETRYING = '14' + + +class VolumeOffering(object): + """ + An SCE specific storage volume offering class. + The volume offering ID is needed to create a volume. + Volume offering IDs are different for each data center. + """ + def __init__(self, id, name, location, extra=None): + self.id = id + self.location = location + self.name = name + self.extra = extra or {} + + def __repr__(self): + return ('' % + (self.id, self.location, self.name, self.extra)) + + +class Address(object): + """ + A reserved IP address that can be attached to an instance. + Properties: id, ip, state, options(location, type, created_time, state, + hostname, instance_ids, vlan, owner, + mode, offering_id) + """ + def __init__(self, id, ip, state, options): + self.id = id + self.ip = ip + self.state = state + self.options = options + + def __repr__(self): + return ('' % + (self.id, self.ip, self.state, self.options)) + + +class IBMNodeDriver(NodeDriver): + """ + Node driver for IBM SmartCloud Enterprise + """ + connectionCls = IBMConnection + type = Provider.IBM + name = "IBM SmartCloud Enterprise" + website = 'http://ibm.com/services/us/en/cloud-enterprise/' + + NODE_STATE_MAP = { + 0: NodeState.PENDING, # New + 1: NodeState.PENDING, # Provisioning + 2: NodeState.TERMINATED, # Failed + 3: NodeState.TERMINATED, # Removed + 4: NodeState.TERMINATED, # Rejected + 5: NodeState.RUNNING, # Active + 6: NodeState.UNKNOWN, # Unknown + 7: NodeState.PENDING, # Deprovisioning + 8: NodeState.REBOOTING, # Restarting + 9: NodeState.PENDING, # Starting + 10: NodeState.PENDING, # Stopping + 11: NodeState.TERMINATED, # Stopped + 12: NodeState.PENDING, # Deprovision Pending + 13: NodeState.PENDING, # Restart Pending + 14: NodeState.PENDING, # Attaching + 15: NodeState.PENDING, # Detaching + } + + def create_node(self, **kwargs): + """ + Creates a node in the IBM SmartCloud Enterprise. + + See :class:`NodeDriver.create_node` for more keyword args. + + @inherits: :class:`NodeDriver.create_node` + + :keyword auth: Name of the pubkey to use. When constructing + :class:`NodeAuthSSHKey` instance, 'pubkey' argument must be the + name of the public key to use. You chose this name when creating + a new public key on the IBM server. + :type auth: :class:`NodeAuthSSHKey` + + :keyword ex_configurationData: Image-specific configuration + parameters. Configuration parameters are defined in the parameters + .xml file. The URL to this file is defined in the NodeImage at + extra[parametersURL]. + Note: This argument must be specified when launching a Windows + instance. It must contain 'UserName' and 'Password' keys. + :type ex_configurationData: ``dict`` + """ + + # Compose headers for message body + data = {} + data.update({'name': kwargs['name']}) + data.update({'imageID': kwargs['image'].id}) + data.update({'instanceType': kwargs['size'].id}) + if 'location' in kwargs: + data.update({'location': kwargs['location'].id}) + else: + data.update({'location': '1'}) + if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey): + data.update({'publicKey': kwargs['auth'].pubkey}) + if 'ex_configurationData' in kwargs: + configurationData = kwargs['ex_configurationData'] + if configurationData: + for key in configurationData.keys(): + data.update({key: configurationData.get(key)}) + + # Send request! + resp = self.connection.request( + action=REST_BASE + '/instances', + headers={'Content-Type': 'application/x-www-form-urlencoded'}, + method='POST', + data=data).object + return self._to_nodes(resp)[0] + + def create_volume(self, size, name, location, **kwargs): + """ + Create a new block storage volume (virtual disk) + + :param size: Size of volume in gigabytes (required). + Find out the possible sizes from the + offerings/storage REST interface + :type size: ``int`` + + :keyword name: Name of the volume to be created (required) + :type name: ``str`` + + :keyword location: Which data center to create a volume in. If + empty, it will fail for IBM SmartCloud Enterprise + (required) + :type location: :class:`NodeLocation` + + :keyword snapshot: Not supported for IBM SmartCloud Enterprise + :type snapshot: ``str`` + + :keyword kwargs.format: Either RAW or EXT3 for IBM SmartCloud + Enterprise (optional) + :type kwargs.format: ``str`` + + :keyword kwargs.offering_id: The storage offering ID for IBM + SmartCloud Enterprise + Find this from the REST interface + storage/offerings. (optional) + :type kwargs.offering_id: ``str`` + + :keyword kwargs.source_disk_id: If cloning a volume, the storage + disk to make a copy from (optional) + :type kwargs.source_disk_id: ``str`` + + :keyword kwargs.storage_area_id: The id of the storage availability + area to create the volume in + (optional) + :type kwargs.storage_area_id: ``str`` + + :keyword kwargs.target_location_id: If cloning a volume, the + storage disk to make a copy + from (optional) + :type kwargs.target_location_id: ``str`` + + :return: The newly created :class:`StorageVolume`. + :rtype: :class:`StorageVolume` + """ + data = {} + data.update({'name': name}) + data.update({'size': size}) + data.update({'location': location}) + if (('format' in kwargs) and (kwargs['format'] is not None)): + data.update({'format': kwargs['format']}) + if (('offering_id' in kwargs) and (kwargs['offering_id'] is not None)): + data.update({'offeringID': kwargs['offering_id']}) + if (('storage_area_id' in kwargs) and + (kwargs['storage_area_id'] is not None)): + data.update({'storageAreaID': kwargs['storage_area_id']}) + if 'source_disk_id' in kwargs: + data.update({'sourceDiskID': kwargs['source_disk_id']}) + data.update({'type': 'clone'}) + if 'target_location_id' in kwargs: + data.update({'targetLocationID': kwargs['target_location_id']}) + resp = self.connection.request( + action=REST_BASE + '/storage', + headers={'Content-Type': 'application/x-www-form-urlencoded'}, + method='POST', + data=data).object + return self._to_volumes(resp)[0] + + def create_image(self, name, description=None, **kwargs): + """ + Create a new node image from an existing volume or image. + + :param name: Name of the image to be created (required) + :type name: ``str`` + + :param description: Description of the image to be created + :type description: ``str`` + + :keyword image_id: The ID of the source image if cloning the image + :type image_id: ``str`` + + :keyword volume_id: The ID of the storage volume if + importing the image + :type volume_id: ``str`` + + :return: The newly created :class:`NodeImage`. + :rtype: :class:`NodeImage` + """ + data = {} + data.update({'name': name}) + if description is not None: + data.update({'description': description}) + if (('image_id' in kwargs) and (kwargs['image_id'] is not None)): + data.update({'imageId': kwargs['image_id']}) + if (('volume_id' in kwargs) and (kwargs['volume_id'] is not None)): + data.update({'volumeId': kwargs['volume_id']}) + resp = self.connection.request( + action=REST_BASE + '/offerings/image', + headers={'Content-Type': 'application/x-www-form-urlencoded'}, + method='POST', + data=data).object + return self._to_images(resp)[0] + + def destroy_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + status = int(self.connection.request(action=url, + method='DELETE').status) + return status == httplib.OK + + def destroy_volume(self, volume): + """ + Destroys a storage volume. + + :param volume: Volume to be destroyed + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + url = REST_BASE + '/storage/%s' % (volume.id) + status = int(self.connection.request(action=url, + method='DELETE').status) + return status == httplib.OK + + def ex_destroy_image(self, image): + """ + Destroys an image. + + :param image: Image to be destroyed + :type image: :class:`NodeImage` + + :return: ``bool`` + """ + + url = REST_BASE + '/offerings/image/%s' % (image.id) + status = int(self.connection.request(action=url, + method='DELETE').status) + return status == 200 + + def attach_volume(self, node, volume): + """ + Attaches volume to node. + + :param node: Node to attach volume to + :type node: :class:`Node` + + :param volume: Volume to attach + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + url = REST_BASE + '/instances/%s' % (node.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'storageID': volume.id, 'type': 'attach'} + resp = self.connection.request(action=url, + method='PUT', + headers=headers, + data=data) + return int(resp.status) == 200 + + def detach_volume(self, node, volume): + """ + Detaches a volume from a node. + + :param node: Node which should be used + :type node: :class:`Node` + + :param volume: Volume to be detached + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + url = REST_BASE + '/instances/%s' % (node.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'storageID': volume.id, 'type': 'detach'} + resp = self.connection.request(action=url, + method='PUT', + headers=headers, + data=data) + return int(resp.status) == 200 + + def reboot_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'state': 'restart'} + + resp = self.connection.request(action=url, + method='PUT', + headers=headers, + data=data) + return int(resp.status) == 200 + + def list_nodes(self): + return self._to_nodes( + self.connection.request(REST_BASE + '/instances').object) + + def list_images(self, location=None): + return self._to_images( + self.connection.request(REST_BASE + '/offerings/image').object) + + def list_volumes(self): + """ + List storage volumes. + + :rtype: ``list`` of :class:`StorageVolume` + """ + return self._to_volumes( + self.connection.request(REST_BASE + '/storage').object) + + def list_sizes(self, location=None): + """ + Returns a generic list of sizes. See list_images() for a list of + supported sizes for specific images. In particular, you need to have + a size that matches the architecture (32-bit vs 64-bit) of the virtual + machine image operating system. + + @inherits: :class:`NodeDriver.list_sizes` + """ + return [ + NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None, + None, self.connection.driver), + NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None, + None, None, self.connection.driver), + NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None, + None, self.connection.driver), + NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None, + None, self.connection.driver), + NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None, + None, self.connection.driver), + NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None, + None, None, self.connection.driver), + NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None, + None, self.connection.driver), + NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None, + None, None, self.connection.driver), + NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit', + None, None, None, None, self.connection.driver)] + + def list_locations(self): + return self._to_locations( + self.connection.request(REST_BASE + '/locations').object) + + def ex_list_storage_offerings(self): + """ + List the storage center offerings + + :rtype: ``list`` of :class:`VolumeOffering` + """ + return self._to_volume_offerings( + self.connection.request(REST_BASE + '/offerings/storage').object) + + def ex_allocate_address(self, location_id, offering_id, vlan_id=None): + """ + Allocate a new reserved IP address + + :param location_id: Target data center + :type location_id: ``str`` + + :param offering_id: Offering ID for address to create + :type offering_id: ``str`` + + :param vlan_id: ID of target VLAN + :type vlan_id: ``str`` + + :return: :class:`Address` object + :rtype: :class:`Address` + """ + url = REST_BASE + '/addresses' + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'location': location_id, 'offeringID': offering_id} + if vlan_id is not None: + data.update({'vlanID': vlan_id}) + resp = self.connection.request(action=url, + method='POST', + headers=headers, + data=data).object + return self._to_addresses(resp)[0] + + def ex_list_addresses(self, resource_id=None): + """ + List the reserved IP addresses + + :param resource_id: If this is supplied only a single address will + be returned (optional) + :type resource_id: ``str`` + + :rtype: ``list`` of :class:`Address` + """ + url = REST_BASE + '/addresses' + if resource_id: + url += '/' + resource_id + return self._to_addresses(self.connection.request(url).object) + + def ex_copy_to(self, image, volume): + """ + Copies a node image to a storage volume + + :param image: source image to copy + :type image: :class:`NodeImage` + + :param volume: Target storage volume to copy to + :type volume: :class:`StorageVolume` + + :return: ``bool`` The success of the operation + :rtype: ``bool`` + """ + url = REST_BASE + '/storage/%s' % (volume.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'imageId': image.id} + resp = self.connection.request(action=url, + method='PUT', + headers=headers, + data=data) + return int(resp.status) == 200 + + def ex_delete_address(self, resource_id): + """ + Delete a reserved IP address + + :param resource_id: The address to delete (required) + :type resource_id: ``str`` + + :rtype: ``bool`` + """ + url = REST_BASE + '/addresses/' + resource_id + status = int(self.connection.request(action=url, + method='DELETE').status) + return status == 200 + + def ex_wait_storage_state(self, volume, state=VolumeState.DETACHED, + wait_period=60, timeout=1200): + """ + Block until storage volume state changes to the given value + + :param volume: Storage volume. + :type volume: :class:`StorageVolume` + + :param state: The target state to wait for + :type state: ``int`` + + :param wait_period: How many seconds to between each loop + iteration (default is 3) + :type wait_period: ``int`` + + :param timeout: How many seconds to wait before timing out + (default is 1200) + :type timeout: ``int`` + + :rtype: :class:`StorageVolume` + """ + start = time.time() + end = start + timeout + + while time.time() < end: + volumes = self.list_volumes() + volumes = list([v for v in volumes if v.uuid == volume.uuid]) + + if (len(volumes) == 1 and volumes[0].extra['state'] == state): + return volumes[0] + else: + time.sleep(wait_period) + continue + + raise LibcloudError(value='Timed out after %d seconds' % (timeout), + driver=self) + + def _to_nodes(self, object): + return [self._to_node(instance) for instance in + object.findall('Instance')] + + def _to_node(self, instance): + public_ips = [] + + ip = instance.findtext('IP') + if ip: + public_ips.append(ip) + + return Node( + id=instance.findtext('ID'), + name=instance.findtext('Name'), + state=self.NODE_STATE_MAP[int(instance.findtext('Status'))], + public_ips=public_ips, + private_ips=[], + driver=self.connection.driver + ) + + def _to_images(self, object): + # Converts data retrieved from SCE /offerings/image REST call to + # a NodeImage + return [self._to_image(image) for image in object.findall('Image')] + + def _to_image(self, image): + # Converts an SCE Image object to a NodeImage + imageID = image.findtext('ID') + imageName = image.findtext('Name') + parametersURL = image.findtext('Manifest') + location = image.findtext('Location') + state = image.findtext('State') + owner = image.findtext('Owner') + visibility = image.findtext('Visibility') + platform = image.findtext('Platform') + description = image.findtext('Description') + documentation = image.findtext('Documentation') + instanceTypes = image.findall('SupportedInstanceTypes') + nodeSizes = self._to_node_sizes(image.find('SupportedInstanceTypes')) + return NodeImage(id=imageID, + name=imageName, + driver=self.connection.driver, + extra={ + 'parametersURL': parametersURL, + 'location': location, + 'state': state, + 'owner': owner, + 'visibility': visibility, + 'platform': platform, + 'description': description, + 'documentation': documentation, + 'instanceTypes': instanceTypes, + 'node_sizes': nodeSizes + } + ) + + def _to_locations(self, object): + return [self._to_location(location) for location in + object.findall('Location')] + + def _to_location(self, location): + # Converts an SCE Location object to a Libcloud NodeLocation object + name_text = location.findtext('Name') + description = location.findtext('Description') + state = location.findtext('State') + (nameVal, separator, countryVal) = name_text.partition(',') + capabiltyElements = location.findall('Capabilities/Capability') + capabilities = {} + for elem in capabiltyElements: + capabilityID = elem.attrib['id'] + entryElements = elem.findall('Entry') + entries = [] + for entryElem in entryElements: + key = entryElem.attrib['key'] + valueElements = elem.findall('Value') + values = [] + for valueElem in valueElements: + values.append(valueElem.text) + entry = {'key': key, 'values': values} + entries.append(entry) + capabilities[capabilityID] = entries + extra = {'description': description, 'state': state, + 'capabilities': capabilities} + return IBMNodeLocation(id=location.findtext('ID'), + name=nameVal, + country=countryVal.strip(), + driver=self.connection.driver, + extra=extra) + + def _to_node_sizes(self, object): + # Converts SCE SupportedInstanceTypes object to + # a list of Libcloud NodeSize objects + return [self._to_node_size(iType) for iType in + object.findall('InstanceType')] + + def _to_node_size(self, object): + # Converts to an SCE InstanceType to a Libcloud NodeSize + return NodeSize(object.findtext('ID'), + object.findtext('Label'), + None, + None, + None, + object.findtext('Price/Rate'), + self.connection.driver) + + def _to_volumes(self, object): + return [self._to_volume(iType) for iType in + object.findall('Volume')] + + def _to_volume(self, object): + # Converts an SCE Volume to a Libcloud StorageVolume + extra = {'state': object.findtext('State'), + 'location': object.findtext('Location'), + 'instanceID': object.findtext('instanceID'), + 'owner': object.findtext('Owner'), + 'format': object.findtext('Format'), + 'createdTime': object.findtext('CreatedTime'), + 'storageAreaID': object.findtext('StorageArea/ID')} + return StorageVolume(object.findtext('ID'), + object.findtext('Name'), + object.findtext('Size'), + self.connection.driver, + extra) + + def _to_volume_offerings(self, object): + return [self._to_volume_offering(iType) for iType in + object.findall('Offerings')] + + def _to_volume_offering(self, object): + # Converts an SCE DescribeVolumeOfferingsResponse/Offerings XML object + # to an SCE VolumeOffering + extra = {'label': object.findtext('Label'), + 'supported_sizes': object.findtext('SupportedSizes'), + 'formats': object.findall('SupportedFormats/Format/ID'), + 'price': object.findall('Price')} + return VolumeOffering(object.findtext('ID'), + object.findtext('Name'), + object.findtext('Location'), + extra) + + def _to_addresses(self, object): + # Converts an SCE DescribeAddressesResponse XML object to a list of + # Address objects + return [self._to_address(iType) for iType in + object.findall('Address')] + + def _to_address(self, object): + # Converts an SCE DescribeAddressesResponse/Address XML object to + # an Address object + extra = {'location': object.findtext('Location'), + 'type': object.findtext('Label'), + 'created_time': object.findtext('SupportedSizes'), + 'hostname': object.findtext('Hostname'), + 'instance_ids': object.findtext('InstanceID'), + 'vlan': object.findtext('VLAN'), + 'owner': object.findtext('owner'), + 'mode': object.findtext('Mode'), + 'offering_id': object.findtext('OfferingID')} + return Address(object.findtext('ID'), + object.findtext('IP'), + object.findtext('State'), + extra) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ikoula.py libcloud-0.15.1/libcloud/compute/drivers/ikoula.py --- libcloud-0.5.0/libcloud/compute/drivers/ikoula.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ikoula.py 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.providers import Provider +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + +__all__ = [ + 'IkoulaNodeDriver' +] + + +class IkoulaNodeDriver(CloudStackNodeDriver): + type = Provider.IKOULA + name = 'Ikoula' + website = 'http://express.ikoula.co.uk/cloudstack' + + # API endpoint info + host = 'cloudstack.ikoula.com' + path = '/client/api' diff -Nru libcloud-0.5.0/libcloud/compute/drivers/__init__.py libcloud-0.15.1/libcloud/compute/drivers/__init__.py --- libcloud-0.5.0/libcloud/compute/drivers/__init__.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/__init__.py 2013-11-29 12:35:04.000000000 +0000 @@ -18,20 +18,23 @@ """ __all__ = [ + 'abiquo', 'brightbox', 'bluebox', 'dummy', 'ec2', 'ecp', + 'elasticstack', 'elastichosts', 'cloudsigma', + 'gce', 'gogrid', - 'ibm_sbc', + 'hostvirtual', + 'ibm_sce', 'linode', 'opennebula', 'rackspace', 'rimuhosting', - 'slicehost', 'softlayer', 'vcloud', 'voxel', diff -Nru libcloud-0.5.0/libcloud/compute/drivers/joyent.py libcloud-0.15.1/libcloud/compute/drivers/joyent.py --- libcloud-0.5.0/libcloud/compute/drivers/joyent.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/joyent.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,222 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Joyent Cloud (http://www.joyentcloud.com) driver. +""" + +import base64 + +try: + import simplejson as json +except: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +from libcloud.common.types import LibcloudError +from libcloud.compute.providers import Provider +from libcloud.common.base import JsonResponse, ConnectionUserAndKey +from libcloud.compute.types import NodeState, InvalidCredsError +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize +from libcloud.utils.networking import is_private_subnet + +API_HOST_SUFFIX = '.api.joyentcloud.com' +API_VERSION = '~6.5' + + +NODE_STATE_MAP = { + 'provisioning': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'stopping': NodeState.TERMINATED, + 'stopped': NodeState.TERMINATED, + 'deleted': NodeState.TERMINATED +} + +VALID_REGIONS = ['us-east-1', 'us-west-1', 'us-sw-1', 'eu-ams-1'] +DEFAULT_REGION = 'us-east-1' + + +class JoyentResponse(JsonResponse): + """ + Joyent response class. + """ + + valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, + httplib.NO_CONTENT] + + def parse_error(self): + if self.status == httplib.UNAUTHORIZED: + data = self.parse_body() + raise InvalidCredsError(data['code'] + ': ' + data['message']) + return self.body + + def success(self): + return self.status in self.valid_response_codes + + +class JoyentConnection(ConnectionUserAndKey): + """ + Joyent connection class. + """ + + responseCls = JoyentResponse + + allow_insecure = False + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json; charset=UTF-8' + headers['X-Api-Version'] = API_VERSION + + user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) + headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) + return headers + + +class JoyentNodeDriver(NodeDriver): + """ + Joyent node driver class. + """ + + type = Provider.JOYENT + name = 'Joyent' + website = 'http://www.joyentcloud.com' + connectionCls = JoyentConnection + features = {'create_node': ['generates_password']} + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region=DEFAULT_REGION, **kwargs): + # Location is here for backward compatibility reasons + if 'location' in kwargs: + region = kwargs['location'] + + if region not in VALID_REGIONS: + msg = 'Invalid region: "%s". Valid region: %s' + raise LibcloudError(msg % (region, + ', '.join(VALID_REGIONS)), driver=self) + + super(JoyentNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, region=region, + **kwargs) + self.connection.host = region + API_HOST_SUFFIX + + def list_images(self): + result = self.connection.request('/my/datasets').object + + images = [] + for value in result: + extra = {'type': value['type'], 'urn': value['urn'], + 'os': value['os'], 'default': value['default']} + image = NodeImage(id=value['id'], name=value['name'], + driver=self.connection.driver, extra=extra) + images.append(image) + + return images + + def list_sizes(self): + result = self.connection.request('/my/packages').object + + sizes = [] + for value in result: + size = NodeSize(id=value['name'], name=value['name'], + ram=value['memory'], disk=value['disk'], + bandwidth=None, price=0.0, + driver=self.connection.driver) + sizes.append(size) + + return sizes + + def list_nodes(self): + result = self.connection.request('/my/machines').object + + nodes = [] + for value in result: + node = self._to_node(value) + nodes.append(node) + + return nodes + + def reboot_node(self, node): + data = json.dumps({'action': 'reboot'}) + result = self.connection.request('/my/machines/%s' % (node.id), + data=data, method='POST') + return result.status == httplib.ACCEPTED + + def destroy_node(self, node): + result = self.connection.request('/my/machines/%s' % (node.id), + method='DELETE') + return result.status == httplib.NO_CONTENT + + def create_node(self, **kwargs): + name = kwargs['name'] + size = kwargs['size'] + image = kwargs['image'] + + data = json.dumps({'name': name, 'package': size.id, + 'dataset': image.id}) + result = self.connection.request('/my/machines', data=data, + method='POST') + return self._to_node(result.object) + + def ex_stop_node(self, node): + """ + Stop node + + :param node: The node to be stopped + :type node: :class:`Node` + + :rtype: ``bool`` + """ + data = json.dumps({'action': 'stop'}) + result = self.connection.request('/my/machines/%s' % (node.id), + data=data, method='POST') + return result.status == httplib.ACCEPTED + + def ex_start_node(self, node): + """ + Start node + + :param node: The node to be stopped + :type node: :class:`Node` + + :rtype: ``bool`` + """ + data = json.dumps({'action': 'start'}) + result = self.connection.request('/my/machines/%s' % (node.id), + data=data, method='POST') + return result.status == httplib.ACCEPTED + + def _to_node(self, data): + state = NODE_STATE_MAP[data['state']] + public_ips = [] + private_ips = [] + extra = {} + + for ip in data['ips']: + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + + if 'credentials' in data['metadata']: + extra['password'] = data['metadata']['credentials']['root'] + + node = Node(id=data['id'], name=data['name'], state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self.connection.driver, extra=extra) + return node diff -Nru libcloud-0.5.0/libcloud/compute/drivers/kili.py libcloud-0.15.1/libcloud/compute/drivers/kili.py --- libcloud-0.5.0/libcloud/compute/drivers/kili.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/kili.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,87 @@ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +HP Public cloud driver which is esentially just a small wrapper around +OpenStack driver. +""" + +from libcloud.compute.types import Provider, LibcloudError +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver + +__all__ = [ + 'KiliCloudNodeDriver' +] + +ENDPOINT_ARGS = { + 'service_type': 'compute', + 'name': 'nova', + 'region': 'RegionOne' +} + +AUTH_URL = 'https://api.kili.io/keystone/v2.0/tokens' + + +class KiliCloudConnection(OpenStack_1_1_Connection): + _auth_version = '2.0_password' + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) + super(KiliCloudConnection, self).__init__(*args, **kwargs) + + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'KiliCloudConnection must have get_endpoint_args set') + + if '2.0_password' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + + public_url = ep.get('publicURL', None) + + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + return public_url + + +class KiliCloudNodeDriver(OpenStack_1_1_NodeDriver): + name = 'Kili Public Cloud' + website = 'http://kili.io/' + connectionCls = KiliCloudConnection + type = Provider.HPCLOUD + + def __init__(self, key, secret, tenant_name, secure=True, + host=None, port=None, **kwargs): + """ + Note: tenant_name argument is required for Kili cloud. + """ + self.tenant_name = tenant_name + super(KiliCloudNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + **kwargs) + + def _ex_connection_class_kwargs(self): + kwargs = self.openstack_connection_kwargs() + kwargs['get_endpoint_args'] = ENDPOINT_ARGS + kwargs['ex_force_auth_url'] = AUTH_URL + kwargs['ex_tenant_name'] = self.tenant_name + + return kwargs diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ktucloud.py libcloud-0.15.1/libcloud/compute/drivers/ktucloud.py --- libcloud-0.5.0/libcloud/compute/drivers/ktucloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ktucloud.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,103 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.providers import Provider +from libcloud.compute.base import Node, NodeImage, NodeSize +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + + +class KTUCloudNodeDriver(CloudStackNodeDriver): + "Driver for KTUCloud Compute platform." + + EMPTY_DISKOFFERINGID = '0' + type = Provider.KTUCLOUD + name = 'KTUCloud' + website = 'https://ucloudbiz.olleh.com/' + + def list_images(self, location=None): + args = { + 'templatefilter': 'executable' + } + if location is not None: + args['zoneid'] = location.id + + imgs = self._sync_request(command='listAvailableProductTypes', + method='GET') + images = [] + + for img in imgs['producttypes']: + images.append( + NodeImage( + img['serviceofferingid'], + img['serviceofferingdesc'], + self, + {'hypervisor': '', + 'format': '', + 'os': img['templatedesc'], + 'templateid': img['templateid'], + 'zoneid': img['zoneid']} + ) + ) + + return images + + def list_sizes(self, location=None): + szs = self._sync_request('listAvailableProductTypes') + sizes = [] + for sz in szs['producttypes']: + diskofferingid = sz.get('diskofferingid', + self.EMPTY_DISKOFFERINGID) + sizes.append(NodeSize( + diskofferingid, + sz['diskofferingdesc'], + 0, 0, 0, 0, self) + ) + return sizes + + def create_node(self, name, size, image, location=None, **kwargs): + params = {'displayname': name, + 'serviceofferingid': image.id, + 'templateid': str(image.extra['templateid']), + 'zoneid': str(image.extra['zoneid'])} + + usageplantype = kwargs.pop('usageplantype', None) + if usageplantype is None: + params['usageplantype'] = 'hourly' + else: + params['usageplantype'] = usageplantype + + if size.id != self.EMPTY_DISKOFFERINGID: + params['diskofferingid'] = size.id + + result = self._async_request( + command='deployVirtualMachine', + params=params, + method='GET') + + node = result['virtualmachine'] + + return Node( + id=node['id'], + name=node['displayname'], + state=self.NODE_STATE_MAP[node['state']], + public_ips=[], + private_ips=[], + driver=self, + extra={ + 'zoneid': image.extra['zoneid'], + 'ip_addresses': [], + 'forwarding_rules': [], + } + ) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/libvirt_driver.py libcloud-0.15.1/libcloud/compute/drivers/libvirt_driver.py --- libcloud-0.5.0/libcloud/compute/drivers/libvirt_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/libvirt_driver.py 2014-07-02 18:47:55.000000000 +0000 @@ -0,0 +1,335 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import with_statement + +import re +import os +import time +import platform +import subprocess +import mimetypes + +from os.path import join as pjoin +from collections import defaultdict + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.compute.base import NodeDriver, Node +from libcloud.compute.base import NodeState +from libcloud.compute.types import Provider +from libcloud.utils.networking import is_public_subnet + +try: + import libvirt + have_libvirt = True +except ImportError: + have_libvirt = False + + +class LibvirtNodeDriver(NodeDriver): + """ + Libvirt (http://libvirt.org/) node driver. + + To enable debug mode, set LIBVIR_DEBUG environment variable. + """ + + type = Provider.LIBVIRT + name = 'Libvirt' + website = 'http://libvirt.org/' + + NODE_STATE_MAP = { + 0: NodeState.TERMINATED, # no state + 1: NodeState.RUNNING, # domain is running + 2: NodeState.PENDING, # domain is blocked on resource + 3: NodeState.TERMINATED, # domain is paused by user + 4: NodeState.TERMINATED, # domain is being shut down + 5: NodeState.TERMINATED, # domain is shut off + 6: NodeState.UNKNOWN, # domain is crashed + 7: NodeState.UNKNOWN, # domain is suspended by guest power management + } + + def __init__(self, uri): + """ + :param uri: Hypervisor URI (e.g. vbox:///session, qemu:///system, + etc.). + :type uri: ``str`` + """ + if not have_libvirt: + raise RuntimeError('Libvirt driver requires \'libvirt\' Python ' + + 'package') + + self._uri = uri + self.connection = libvirt.open(uri) + + def list_nodes(self): + domains = self.connection.listAllDomains() + nodes = self._to_nodes(domains=domains) + return nodes + + def reboot_node(self, node): + domain = self._get_domain_for_node(node=node) + return domain.reboot(flags=0) == 0 + + def destroy_node(self, node): + domain = self._get_domain_for_node(node=node) + return domain.destroy() == 0 + + def ex_start_node(self, node): + """ + Start a stopped node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + domain = self._get_domain_for_node(node=node) + return domain.create() == 0 + + def ex_shutdown_node(self, node): + """ + Shutdown a running node. + + Note: Usually this will result in sending an ACPI event to the node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + domain = self._get_domain_for_node(node=node) + return domain.shutdown() == 0 + + def ex_suspend_node(self, node): + """ + Suspend a running node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + domain = self._get_domain_for_node(node=node) + return domain.suspend() == 0 + + def ex_resume_node(self, node): + """ + Resume a suspended node. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + domain = self._get_domain_for_node(node=node) + return domain.resume() == 0 + + def ex_take_node_screenshot(self, node, directory, screen=0): + """ + Take a screenshot of a monitoring of a running instance. + + :param node: Node to take the screenshot of. + :type node: :class:`libcloud.compute.base.Node` + + :param directory: Path where the screenshot will be saved. + :type directory: ``str`` + + :param screen: ID of the monitor to take the screenshot of. + :type screen: ``int`` + + :return: Full path where the screenshot has been saved. + :rtype: ``str`` + """ + if not os.path.exists(directory) or not os.path.isdir(directory): + raise ValueError('Invalid value for directory argument') + + domain = self._get_domain_for_node(node=node) + stream = self.connection.newStream() + mime_type = domain.screenshot(stream=stream, screen=0) + extensions = mimetypes.guess_all_extensions(type=mime_type) + + if extensions: + extension = extensions[0] + else: + extension = '.png' + + name = 'screenshot-%s%s' % (int(time.time()), extension) + file_path = pjoin(directory, name) + + with open(file_path, 'wb') as fp: + def write(stream, buf, opaque): + fp.write(buf) + + stream.recvAll(write, None) + + try: + stream.finish() + except Exception: + # Finish is not supported by all backends + pass + + return file_path + + def ex_get_hypervisor_hostname(self): + """ + Return a system hostname on which the hypervisor is running. + """ + hostname = self.connection.getHostname() + return hostname + + def ex_get_hypervisor_sysinfo(self): + """ + Retrieve hypervisor system information. + + :rtype: ``dict`` + """ + xml = self.connection.getSysinfo() + etree = ET.XML(xml) + + attributes = ['bios', 'system', 'processor', 'memory_device'] + + sysinfo = {} + for attribute in attributes: + element = etree.find(attribute) + entries = self._get_entries(element=element) + sysinfo[attribute] = entries + + return sysinfo + + def _to_nodes(self, domains): + nodes = [self._to_node(domain=domain) for domain in domains] + return nodes + + def _to_node(self, domain): + state, max_mem, memory, vcpu_count, used_cpu_time = domain.info() + state = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN) + + public_ips, private_ips = [], [] + + ip_addresses = self._get_ip_addresses_for_domain(domain) + + for ip_address in ip_addresses: + if is_public_subnet(ip_address): + public_ips.append(ip_address) + else: + private_ips.append(ip_address) + + extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(), + 'types': self.connection.getType(), + 'used_memory': memory / 1024, 'vcpu_count': vcpu_count, + 'used_cpu_time': used_cpu_time} + + node = Node(id=domain.ID(), name=domain.name(), state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self, extra=extra) + node._uuid = domain.UUIDString() # we want to use a custom UUID + return node + + def _get_ip_addresses_for_domain(self, domain): + """ + Retrieve IP addresses for the provided domain. + + Note: This functionality is currently only supported on Linux and + only works if this code is run on the same machine as the VMs run + on. + + :return: IP addresses for the provided domain. + :rtype: ``list`` + """ + result = [] + + if platform.system() != 'Linux': + # Only Linux is supported atm + return result + + mac_addresses = self._get_mac_addresses_for_domain(domain=domain) + + cmd = ['arp', '-an'] + child = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, _ = child.communicate() + arp_table = self._parse_arp_table(arp_output=stdout) + + for mac_address in mac_addresses: + if mac_address in arp_table: + ip_addresses = arp_table[mac_address] + result.extend(ip_addresses) + + return result + + def _get_mac_addresses_for_domain(self, domain): + """ + Parses network interface MAC addresses from the provided domain. + """ + xml = domain.XMLDesc() + etree = ET.XML(xml) + elems = etree.findall("devices/interface[@type='network']/mac") + + result = [] + for elem in elems: + mac_address = elem.get('address') + result.append(mac_address) + + return result + + def _get_domain_for_node(self, node): + """ + Return libvirt domain object for the provided node. + """ + domain = self.connection.lookupByUUIDString(node.uuid) + return domain + + def _get_entries(self, element): + """ + Parse entries dictionary. + + :rtype: ``dict`` + """ + elements = element.findall('entry') + + result = {} + for element in elements: + name = element.get('name') + value = element.text + result[name] = value + + return result + + def _parse_arp_table(self, arp_output): + """ + Parse arp command output and return a dictionary which maps mac address + to an IP address. + + :return: Dictionary which maps mac address to IP address. + :rtype: ``dict`` + """ + lines = arp_output.split('\n') + + arp_table = defaultdict(list) + for line in lines: + match = re.match('.*?\((.*?)\) at (.*?)\s+', line) + + if not match: + continue + + groups = match.groups() + ip_address = groups[0] + mac_address = groups[1] + arp_table[mac_address].append(ip_address) + + return arp_table diff -Nru libcloud-0.5.0/libcloud/compute/drivers/linode.py libcloud-0.15.1/libcloud/compute/drivers/linode.py --- libcloud-0.5.0/libcloud/compute/drivers/linode.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/linode.py 2014-06-11 14:27:59.000000000 +0000 @@ -15,9 +15,9 @@ """libcloud driver for the Linode(R) API -This driver implements all libcloud functionality for the Linode API. Since the -API is a bit more fine-grained, create_node abstracts a significant amount of -work (and may take a while to run). +This driver implements all libcloud functionality for the Linode API. +Since the API is a bit more fine-grained, create_node abstracts a significant +amount of work (and may take a while to run). Linode home page http://www.linode.com/ Linode API documentation http://www.linode.com/api/ @@ -26,156 +26,28 @@ Linode(R) is a registered trademark of Linode, LLC. """ -import itertools -import os -from copy import copy +import os try: - import json -except: import simplejson as json +except ImportError: + import json + +import itertools +import binascii + +from copy import copy -from libcloud.common.base import ConnectionKey, Response -from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.utils.py3 import PY3 + +from libcloud.common.linode import (API_ROOT, LinodeException, + LinodeConnection, LINODE_PLAN_IDS) from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey from libcloud.compute.base import NodeImage -# Where requests go - in beta situations, this information may change. -LINODE_API = "api.linode.com" -LINODE_ROOT = "/" - -# Map of TOTALRAM to PLANID, allows us to figure out what plan -# a particular node is on (updated with new plan sizes 6/28/10) -LINODE_PLAN_IDS = {512:'1', - 768:'2', - 1024:'3', - 1536:'4', - 2048:'5', - 4096:'6', - 8192:'7', - 12288:'8', - 16384:'9', - 20480:'10'} - - -class LinodeException(Exception): - """Error originating from the Linode API - - This class wraps a Linode API error, a list of which is available in the - API documentation. All Linode API errors are a numeric code and a - human-readable description. - """ - def __str__(self): - return "(%u) %s" % (self.args[0], self.args[1]) - def __repr__(self): - return "" % (self.args[0], self.args[1]) - - -class LinodeResponse(Response): - """Linode API response - - Wraps the HTTP response returned by the Linode API, which should be JSON in - this structure: - - { - "ERRORARRAY": [ ... ], - "DATA": [ ... ], - "ACTION": " ... " - } - - libcloud does not take advantage of batching, so a response will always - reflect the above format. A few weird quirks are caught here as well.""" - def __init__(self, response): - """Instantiate a LinodeResponse from the HTTP response - - @keyword response: The raw response returned by urllib - @return: parsed L{LinodeResponse}""" - self.body = response.read() - self.status = response.status - self.headers = dict(response.getheaders()) - self.error = response.reason - self.invalid = LinodeException(0xFF, - "Invalid JSON received from server") - - # Move parse_body() to here; we can't be sure of failure until we've - # parsed the body into JSON. - self.objects, self.errors = self.parse_body() - if not self.success(): - # Raise the first error, as there will usually only be one - raise self.errors[0] - - def parse_body(self): - """Parse the body of the response into JSON objects - - If the response chokes the parser, action and data will be returned as - None and errorarray will indicate an invalid JSON exception. - - @return: C{list} of objects and C{list} of errors""" - try: - js = json.loads(self.body) - except: - raise MalformedResponseError("Failed to parse JSON", body=self.body, - driver=LinodeNodeDriver) - - try: - if isinstance(js, dict): - # solitary response - promote to list - js = [js] - ret = [] - errs = [] - for obj in js: - if ("DATA" not in obj or "ERRORARRAY" not in obj - or "ACTION" not in obj): - ret.append(None) - errs.append(self.invalid) - continue - ret.append(obj["DATA"]) - errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"]) - return (ret, errs) - except: - return (None, [self.invalid]) - - def success(self): - """Check the response for success - - The way we determine success is by the presence of an error in - ERRORARRAY. If one is there, we assume the whole request failed. - - @return: C{bool} indicating a successful request""" - return len(self.errors) == 0 - - def _make_excp(self, error): - """Convert an API error to a LinodeException instance - - @keyword error: JSON object containing C{ERRORCODE} and C{ERRORMESSAGE} - @type error: dict""" - if "ERRORCODE" not in error or "ERRORMESSAGE" not in error: - return None - if error["ERRORCODE"] == 4: - return InvalidCredsError(error["ERRORMESSAGE"]) - return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"]) - - -class LinodeConnection(ConnectionKey): - """A connection to the Linode API - - Wraps SSL connections to the Linode API, automagically injecting the - parameters that the API needs for each request.""" - host = LINODE_API - responseCls = LinodeResponse - - def add_default_params(self, params): - """Add parameters that are necessary for every request - - This method adds C{api_key} and C{api_responseFormat} to the request.""" - params["api_key"] = self.key - # Be explicit about this in case the default changes. - params["api_responseFormat"] = "json" - return params - class LinodeNodeDriver(NodeDriver): """libcloud driver for the Linode API @@ -199,50 +71,63 @@ """ type = Provider.LINODE name = "Linode" + website = 'http://www.linode.com/' connectionCls = LinodeConnection _linode_plan_ids = LINODE_PLAN_IDS + features = {'create_node': ['ssh_key', 'password']} def __init__(self, key): """Instantiate the driver with the given API key - @keyword key: the API key to use - @type key: C{str}""" + :param key: the API key to use (required) + :type key: ``str`` + + :rtype: ``None`` + """ self.datacenter = None NodeDriver.__init__(self, key) # Converts Linode's state from DB to a NodeState constant. LINODE_STATES = { - -2: NodeState.UNKNOWN, # Boot Failed - -1: NodeState.PENDING, # Being Created - 0: NodeState.PENDING, # Brand New - 1: NodeState.RUNNING, # Running - 2: NodeState.TERMINATED, # Powered Off - 3: NodeState.REBOOTING, # Shutting Down - 4: NodeState.UNKNOWN # Reserved + (-2): NodeState.UNKNOWN, # Boot Failed + (-1): NodeState.PENDING, # Being Created + 0: NodeState.PENDING, # Brand New + 1: NodeState.RUNNING, # Running + 2: NodeState.TERMINATED, # Powered Off + 3: NodeState.REBOOTING, # Shutting Down + 4: NodeState.UNKNOWN # Reserved } def list_nodes(self): - """List all Linodes that the API key can access + """ + List all Linodes that the API key can access - This call will return all Linodes that the API key in use has access to. + This call will return all Linodes that the API key in use has access + to. If a node is in this list, rebooting will work; however, creation and destruction are a separate grant. - @return: C{list} of L{Node} objects that the API key can access""" - params = { "api_action": "linode.list" } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + :return: List of node objects that the API key can access + :rtype: ``list`` of :class:`Node` + """ + params = {"api_action": "linode.list"} + data = self.connection.request(API_ROOT, params=params).objects[0] return self._to_nodes(data) def reboot_node(self, node): - """Reboot the given Linode + """ + Reboot the given Linode Will issue a shutdown job followed by a boot job, using the last booted configuration. In most cases, this will be the only configuration. - @keyword node: the Linode to reboot - @type node: L{Node}""" - params = { "api_action": "linode.reboot", "LinodeID": node.id } - self.connection.request(LINODE_ROOT, params=params) + :param node: the Linode to reboot + :type node: :class:`Node` + + :rtype: ``bool`` + """ + params = {"api_action": "linode.reboot", "LinodeID": node.id} + self.connection.request(API_ROOT, params=params) return True def destroy_node(self, node): @@ -254,13 +139,16 @@ In most cases, all disk images must be removed from a Linode before the Linode can be removed; however, this call explicitly skips those - safeguards. There is no going back from this method. + safeguards. There is no going back from this method. + + :param node: the Linode to destroy + :type node: :class:`Node` - @keyword node: the Linode to destroy - @type node: L{Node}""" - params = { "api_action": "linode.delete", "LinodeID": node.id, - "skipChecks": True } - self.connection.request(LINODE_ROOT, params=params) + :rtype: ``bool`` + """ + params = {"api_action": "linode.delete", "LinodeID": node.id, + "skipChecks": True} + self.connection.request(API_ROOT, params=params) return True def create_node(self, **kwargs): @@ -273,54 +161,55 @@ Note that there is a safety valve of 5 Linodes per hour, in order to prevent a runaway script from ruining your day. - @keyword name: the name to assign the Linode (mandatory) - @type name: C{str} + :keyword name: the name to assign the Linode (mandatory) + :type name: ``str`` - @keyword image: which distribution to deploy on the Linode (mandatory) - @type image: L{NodeImage} + :keyword image: which distribution to deploy on the Linode (mandatory) + :type image: :class:`NodeImage` - @keyword size: the plan size to create (mandatory) - @type size: L{NodeSize} + :keyword size: the plan size to create (mandatory) + :type size: :class:`NodeSize` - @keyword auth: an SSH key or root password (mandatory) - @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + :keyword auth: an SSH key or root password (mandatory) + :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword` - @keyword location: which datacenter to create the Linode in - @type location: L{NodeLocation} + :keyword location: which datacenter to create the Linode in + :type location: :class:`NodeLocation` - @keyword ex_swap: size of the swap partition in MB (128) - @type ex_swap: C{int} + :keyword ex_swap: size of the swap partition in MB (128) + :type ex_swap: ``int`` - @keyword ex_rsize: size of the root partition in MB (plan size - swap). - @type ex_rsize: C{int} + :keyword ex_rsize: size of the root partition in MB (plan size - swap). + :type ex_rsize: ``int`` - @keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable). - @type ex_kernel: C{str} + :keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable). + :type ex_kernel: ``str`` - @keyword ex_payment: one of 1, 12, or 24; subscription length (1) - @type ex_payment: C{int} + :keyword ex_payment: one of 1, 12, or 24; subscription length (1) + :type ex_payment: ``int`` - @keyword ex_comment: a small comment for the configuration (libcloud) - @type ex_comment: C{str} + :keyword ex_comment: a small comment for the configuration (libcloud) + :type ex_comment: ``str`` - @keyword ex_private: whether or not to request a private IP (False) - @type ex_private: C{bool} + :keyword ex_private: whether or not to request a private IP (False) + :type ex_private: ``bool`` - @keyword lconfig: what to call the configuration (generated) - @type lconfig: C{str} + :keyword lconfig: what to call the configuration (generated) + :type lconfig: ``str`` - @keyword lroot: what to call the root image (generated) - @type lroot: C{str} + :keyword lroot: what to call the root image (generated) + :type lroot: ``str`` - @keyword lswap: what to call the swap space (generated) - @type lswap: C{str} + :keyword lswap: what to call the swap space (generated) + :type lswap: ``str`` - @return: a L{Node} representing the newly-created Linode + :return: Node representing the newly-created Linode + :rtype: :class:`Node` """ name = kwargs["name"] image = kwargs["image"] size = kwargs["size"] - auth = kwargs["auth"] + auth = self._get_and_check_auth(kwargs["auth"]) # Pick a location (resolves LIBCLOUD-41 in JIRA) if "location" in kwargs: @@ -340,7 +229,8 @@ raise LinodeException(0xFB, "Invalid plan ID -- avail.plans") # Payment schedule - payment = "1" if "ex_payment" not in kwargs else str(kwargs["ex_payment"]) + payment = "1" if "ex_payment" not in kwargs else \ + str(kwargs["ex_payment"]) if payment not in ["1", "12", "24"]: raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)") @@ -354,15 +244,17 @@ if not ssh and not root: raise LinodeException(0xFB, "Need SSH key or root password") - if not root is None and len(root) < 6: + if root is not None and len(root) < 6: raise LinodeException(0xFB, "Root password is too short") # Swap size - try: swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"]) - except: raise LinodeException(0xFB, "Need an integer swap size") + try: + swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"]) + except: + raise LinodeException(0xFB, "Need an integer swap size") # Root partition size - imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else \ + imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else\ int(kwargs["ex_rsize"]) if (imagesize + swap) > size.disk: raise LinodeException(0xFB, "Total disk images are too big") @@ -378,37 +270,29 @@ kernel = kwargs["ex_kernel"] else: if image.extra['64bit']: - kernel = 111 if image.extra['pvops'] else 107 + # For a list of available kernel ids, see + # https://www.linode.com/kernels/ + kernel = 138 else: - kernel = 110 if image.extra['pvops'] else 60 - params = { "api_action": "avail.kernels" } - kernels = self.connection.request(LINODE_ROOT, params=params).objects[0] + kernel = 137 + params = {"api_action": "avail.kernels"} + kernels = self.connection.request(API_ROOT, params=params).objects[0] if kernel not in [z["KERNELID"] for z in kernels]: raise LinodeException(0xFB, "Invalid kernel -- avail.kernels") # Comments - comments = "Created by Apache libcloud " if \ + comments = "Created by Apache libcloud " if\ "ex_comment" not in kwargs else kwargs["ex_comment"] - # Labels - label = { - "lconfig": "[%s] Configuration Profile" % name, - "lroot": "[%s] %s Disk Image" % (name, image.name), - "lswap": "[%s] Swap Space" % name - } - for what in ["lconfig", "lroot", "lswap"]: - if what in kwargs: - label[what] = kwargs[what] - # Step 1: linode.create params = { - "api_action": "linode.create", + "api_action": "linode.create", "DatacenterID": chosen, - "PlanID": size.id, - "PaymentTerm": payment + "PlanID": size.id, + "PaymentTerm": payment } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] - linode = { "id": data["LinodeID"] } + data = self.connection.request(API_ROOT, params=params).objects[0] + linode = {"id": data["LinodeID"]} # Step 1b. linode.update to rename the Linode params = { @@ -416,97 +300,123 @@ "LinodeID": linode["id"], "Label": name } - self.connection.request(LINODE_ROOT, params=params) + self.connection.request(API_ROOT, params=params) # Step 1c. linode.ip.addprivate if it was requested if "ex_private" in kwargs and kwargs["ex_private"]: params = { - "api_action": "linode.ip.addprivate", - "LinodeID": linode["id"] + "api_action": "linode.ip.addprivate", + "LinodeID": linode["id"] } - self.connection.request(LINODE_ROOT, params=params) + self.connection.request(API_ROOT, params=params) + + # Step 1d. Labels + # use the linode id as the name can be up to 63 chars and the labels + # are limited to 48 chars + label = { + "lconfig": "[%s] Configuration Profile" % linode["id"], + "lroot": "[%s] %s Disk Image" % (linode["id"], image.name), + "lswap": "[%s] Swap Space" % linode["id"] + } + for what in ["lconfig", "lroot", "lswap"]: + if what in kwargs: + label[what] = kwargs[what] # Step 2: linode.disk.createfromdistribution if not root: - root = os.urandom(8).encode('hex') + root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip() + params = { - "api_action": "linode.disk.createfromdistribution", - "LinodeID": linode["id"], - "DistributionID": image.id, - "Label": label["lroot"], - "Size": imagesize, - "rootPass": root, + "api_action": "linode.disk.createfromdistribution", + "LinodeID": linode["id"], + "DistributionID": image.id, + "Label": label["lroot"], + "Size": imagesize, + "rootPass": root, } - if ssh: params["rootSSHKey"] = ssh - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + if ssh: + params["rootSSHKey"] = ssh + data = self.connection.request(API_ROOT, params=params).objects[0] linode["rootimage"] = data["DiskID"] # Step 3: linode.disk.create for swap params = { - "api_action": "linode.disk.create", - "LinodeID": linode["id"], - "Label": label["lswap"], - "Type": "swap", - "Size": swap + "api_action": "linode.disk.create", + "LinodeID": linode["id"], + "Label": label["lswap"], + "Type": "swap", + "Size": swap } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + data = self.connection.request(API_ROOT, params=params).objects[0] linode["swapimage"] = data["DiskID"] # Step 4: linode.config.create for main profile disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"]) params = { - "api_action": "linode.config.create", - "LinodeID": linode["id"], - "KernelID": kernel, - "Label": label["lconfig"], - "Comments": comments, - "DiskList": disks + "api_action": "linode.config.create", + "LinodeID": linode["id"], + "KernelID": kernel, + "Label": label["lconfig"], + "Comments": comments, + "DiskList": disks } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + data = self.connection.request(API_ROOT, params=params).objects[0] linode["config"] = data["ConfigID"] # Step 5: linode.boot params = { - "api_action": "linode.boot", - "LinodeID": linode["id"], - "ConfigID": linode["config"] + "api_action": "linode.boot", + "LinodeID": linode["id"], + "ConfigID": linode["config"] } - self.connection.request(LINODE_ROOT, params=params) + self.connection.request(API_ROOT, params=params) # Make a node out of it and hand it back - params = { "api_action": "linode.list", "LinodeID": linode["id"] } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] - return self._to_nodes(data) + params = {"api_action": "linode.list", "LinodeID": linode["id"]} + data = self.connection.request(API_ROOT, params=params).objects[0] + nodes = self._to_nodes(data) + + if len(nodes) == 1: + node = nodes[0] + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + return node + + return None def list_sizes(self, location=None): - """List available Linode plans + """ + List available Linode plans Gets the sizes that can be used for creating a Linode. Since available Linode plans vary per-location, this method can also be passed a location to filter the availability. - @keyword location: the facility to retrieve plans in - @type location: NodeLocation + :keyword location: the facility to retrieve plans in + :type location: :class:`NodeLocation` - @return: a C{list} of L{NodeSize}s""" - params = { "api_action": "avail.linodeplans" } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + :rtype: ``list`` of :class:`NodeSize` + """ + params = {"api_action": "avail.linodeplans"} + data = self.connection.request(API_ROOT, params=params).objects[0] sizes = [] for obj in data: n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"], - disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"], - price=obj["PRICE"], driver=self.connection.driver) + disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"], + price=obj["PRICE"], driver=self.connection.driver) sizes.append(n) return sizes def list_images(self): - """List available Linux distributions + """ + List available Linux distributions Retrieve all Linux distributions that can be deployed to a Linode. - @return: a C{list} of L{NodeImage}s""" - params = { "api_action": "avail.distributions" } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + :rtype: ``list`` of :class:`NodeImage` + """ + params = {"api_action": "avail.distributions"} + data = self.connection.request(API_ROOT, params=params).objects[0] distros = [] for obj in data: i = NodeImage(id=obj["DISTRIBUTIONID"], @@ -518,19 +428,26 @@ return distros def list_locations(self): - """List available facilities for deployment + """ + List available facilities for deployment Retrieve all facilities that a Linode can be deployed in. - @return: a C{list} of L{NodeLocation}s""" - params = { "api_action": "avail.datacenters" } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + :rtype: ``list`` of :class:`NodeLocation` + """ + params = {"api_action": "avail.datacenters"} + data = self.connection.request(API_ROOT, params=params).objects[0] nl = [] for dc in data: country = None - if "USA" in dc["LOCATION"]: country = "US" - elif "UK" in dc["LOCATION"]: country = "GB" - else: country = "??" + if "USA" in dc["LOCATION"]: + country = "US" + elif "UK" in dc["LOCATION"]: + country = "GB" + elif "JP" in dc["LOCATION"]: + country = "JP" + else: + country = "??" nl.append(NodeLocation(dc["DATACENTERID"], dc["LOCATION"], country, @@ -538,17 +455,22 @@ return nl def linode_set_datacenter(self, dc): - """Set the default datacenter for Linode creation + """ + Set the default datacenter for Linode creation Since Linodes must be created in a facility, this function sets the - default that L{create_node} will use. If a C{location} keyword is not - passed to L{create_node}, this method must have already been used. + default that :class:`create_node` will use. If a location keyword is + not passed to :class:`create_node`, this method must have already been + used. + + :keyword dc: the datacenter to create Linodes in unless specified + :type dc: :class:`NodeLocation` - @keyword dc: the datacenter to create Linodes in unless specified - @type dc: L{NodeLocation}""" + :rtype: ``bool`` + """ did = dc.id - params = { "api_action": "avail.datacenters" } - data = self.connection.request(LINODE_ROOT, params=params).objects[0] + params = {"api_action": "avail.datacenters"} + data = self.connection.request(API_ROOT, params=params).objects[0] for datacenter in data: if did == dc["DATACENTERID"]: self.datacenter = did @@ -561,18 +483,19 @@ def _to_nodes(self, objs): """Convert returned JSON Linodes into Node instances - @keyword objs: C{list} of JSON dictionaries representing the Linodes - @type objs: C{list} - @return: C{list} of L{Node}s""" + :keyword objs: ``list`` of JSON dictionaries representing the Linodes + :type objs: ``list`` + :return: ``list`` of :class:`Node`s""" # Get the IP addresses for the Linodes nodes = {} batch = [] for o in objs: lid = o["LINODEID"] - nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ip=[], - private_ip=[], state=self.LINODE_STATES[o["STATUS"]], - driver=self.connection.driver) + nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[], + private_ips=[], + state=self.LINODE_STATES[o["STATUS"]], + driver=self.connection.driver) n.extra = copy(o) n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM")) batch.append({"api_action": "linode.ip.list", "LinodeID": lid}) @@ -580,12 +503,17 @@ # Avoid batch limitation ip_answers = [] args = [iter(batch)] * 25 - izip_longest = getattr(itertools, 'izip_longest', _izip_longest) + + if PY3: + izip_longest = itertools.zip_longest + else: + izip_longest = getattr(itertools, 'izip_longest', _izip_longest) + for twenty_five in izip_longest(*args): twenty_five = [q for q in twenty_five if q] - params = { "api_action": "batch", - "api_requestArray": json.dumps(twenty_five) } - req = self.connection.request(LINODE_ROOT, params=params) + params = {"api_action": "batch", + "api_requestArray": json.dumps(twenty_five)} + req = self.connection.request(API_ROOT, params=params) if not req.success() or len(req.objects) == 0: return None ip_answers.extend(req.objects) @@ -594,21 +522,23 @@ for ip_list in ip_answers: for ip in ip_list: lid = ip["LINODEID"] - which = nodes[lid].public_ip if ip["ISPUBLIC"] == 1 else \ - nodes[lid].private_ip + which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\ + nodes[lid].private_ips which.append(ip["IPADDRESS"]) - return nodes.values() + return list(nodes.values()) - features = {"create_node": ["ssh_key", "password"]} def _izip_longest(*args, **kwds): """Taken from Python docs http://docs.python.org/library/itertools.html#itertools.izip """ + fillvalue = kwds.get('fillvalue') - def sentinel(counter = ([fillvalue]*(len(args)-1)).pop): - yield counter() # yields the fillvalue, or raises IndexError + + def sentinel(counter=([fillvalue] * (len(args) - 1)).pop): + yield counter() # yields the fillvalue, or raises IndexError + fillers = itertools.repeat(fillvalue) iters = [itertools.chain(it, sentinel(), fillers) for it in args] try: diff -Nru libcloud-0.5.0/libcloud/compute/drivers/nephoscale.py libcloud-0.15.1/libcloud/compute/drivers/nephoscale.py --- libcloud-0.5.0/libcloud/compute/drivers/nephoscale.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/nephoscale.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,448 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +NephoScale Cloud driver (http://www.nephoscale.com) +API documentation: http://docs.nephoscale.com +Created by Markos Gogoulos (https://mist.io) +""" + +import base64 +import sys +import time +import os +import binascii + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b +from libcloud.utils.py3 import urlencode + +from libcloud.compute.providers import Provider +from libcloud.common.base import JsonResponse, ConnectionUserAndKey +from libcloud.compute.types import (NodeState, InvalidCredsError, + LibcloudError) +from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize, + NodeLocation) +from libcloud.utils.networking import is_private_subnet + +API_HOST = 'api.nephoscale.com' + +NODE_STATE_MAP = { + 'on': NodeState.RUNNING, + 'off': NodeState.UNKNOWN, + 'unknown': NodeState.UNKNOWN, +} + +VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, + httplib.NO_CONTENT] + +# used in create_node and specifies how many times to get the list of nodes and +# check if the newly created node is there. This is because when a request is +# sent to create a node, NephoScale replies with the job id, and not the node +# itself thus we don't have the ip addresses, that are required in deploy_node +CONNECT_ATTEMPTS = 10 + + +class NodeKey(object): + def __init__(self, id, name, public_key=None, key_group=None, + password=None): + self.id = id + self.name = name + self.key_group = key_group + self.password = password + self.public_key = public_key + + def __repr__(self): + return (('') % + (self.id, self.name)) + + +class NephoscaleResponse(JsonResponse): + """ + Nephoscale API Response + """ + + def parse_error(self): + if self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError('Authorization Failed') + if self.status == httplib.NOT_FOUND: + raise Exception("The resource you are looking for is not found.") + + return self.body + + def success(self): + return self.status in VALID_RESPONSE_CODES + + +class NephoscaleConnection(ConnectionUserAndKey): + """ + Nephoscale connection class. + Authenticates to the API through Basic Authentication + with username/password + """ + host = API_HOST + responseCls = NephoscaleResponse + + allow_insecure = False + + def add_default_headers(self, headers): + """ + Add parameters that are necessary for every request + """ + user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) + headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) + return headers + + +class NephoscaleNodeDriver(NodeDriver): + """ + Nephoscale node driver class. + + >>> from libcloud.compute.providers import get_driver + >>> driver = get_driver('nephoscale') + >>> conn = driver('nepho_user','nepho_password') + >>> conn.list_nodes() + """ + + type = Provider.NEPHOSCALE + api_name = 'nephoscale' + name = 'NephoScale' + website = 'http://www.nephoscale.com' + connectionCls = NephoscaleConnection + features = {'create_node': ['ssh_key']} + + def list_locations(self): + """ + List available zones for deployment + + :rtype: ``list`` of :class:`NodeLocation` + """ + result = self.connection.request('/datacenter/zone/').object + locations = [] + for value in result.get('data', []): + location = NodeLocation(id=value.get('id'), + name=value.get('name'), + country='US', + driver=self) + locations.append(location) + return locations + + def list_images(self): + """ + List available images for deployment + + :rtype: ``list`` of :class:`NodeImage` + """ + result = self.connection.request('/image/server/').object + images = [] + for value in result.get('data', []): + extra = {'architecture': value.get('architecture'), + 'disks': value.get('disks'), + 'billable_type': value.get('billable_type'), + 'pcpus': value.get('pcpus'), + 'cores': value.get('cores'), + 'uri': value.get('uri'), + 'storage': value.get('storage'), + } + image = NodeImage(id=value.get('id'), + name=value.get('friendly_name'), + driver=self, + extra=extra) + images.append(image) + return images + + def list_sizes(self): + """ + List available sizes containing prices + + :rtype: ``list`` of :class:`NodeSize` + """ + result = self.connection.request('/server/type/cloud/').object + sizes = [] + for value in result.get('data', []): + value_id = value.get('id') + size = NodeSize(id=value_id, + name=value.get('friendly_name'), + ram=value.get('ram'), + disk=value.get('storage'), + bandwidth=None, + price=self._get_size_price(size_id=str(value_id)), + driver=self) + sizes.append(size) + + return sorted(sizes, key=lambda k: k.price) + + def list_nodes(self): + """ + List available nodes + + :rtype: ``list`` of :class:`Node` + """ + result = self.connection.request('/server/cloud/').object + nodes = [self._to_node(value) for value in result.get('data', [])] + return nodes + + def rename_node(self, node, name, hostname=None): + """rename a cloud server, optionally specify hostname too""" + data = {'name': name} + if hostname: + data['hostname'] = hostname + params = urlencode(data) + result = self.connection.request('/server/cloud/%s/' % node.id, + data=params, method='PUT').object + return result.get('response') in VALID_RESPONSE_CODES + + def reboot_node(self, node): + """reboot a running node""" + result = self.connection.request('/server/cloud/%s/initiator/restart/' + % node.id, method='POST').object + return result.get('response') in VALID_RESPONSE_CODES + + def ex_start_node(self, node): + """start a stopped node""" + result = self.connection.request('/server/cloud/%s/initiator/start/' + % node.id, method='POST').object + return result.get('response') in VALID_RESPONSE_CODES + + def ex_stop_node(self, node): + """stop a running node""" + result = self.connection.request('/server/cloud/%s/initiator/stop/' + % node.id, method='POST').object + return result.get('response') in VALID_RESPONSE_CODES + + def destroy_node(self, node): + """destroy a node""" + result = self.connection.request('/server/cloud/%s/' % node.id, + method='DELETE').object + return result.get('response') in VALID_RESPONSE_CODES + + def ex_list_keypairs(self, ssh=False, password=False, key_group=None): + """ + List available console and server keys + There are two types of keys for NephoScale, ssh and password keys. + If run without arguments, lists all keys. Otherwise list only + ssh keys, or only password keys. + Password keys with key_group 4 are console keys. When a server + is created, it has two keys, one password or ssh key, and + one password console key. + + :keyword ssh: if specified, show ssh keys only (optional) + :type ssh: ``bool`` + + :keyword password: if specified, show password keys only (optional) + :type password: ``bool`` + + :keyword key_group: if specified, show keys with this key_group only + eg key_group=4 for console password keys (optional) + :type key_group: ``int`` + + :rtype: ``list`` of :class:`NodeKey` + """ + if (ssh and password): + raise LibcloudError('You can only supply ssh or password. To \ +get all keys call with no arguments') + if ssh: + result = self.connection.request('/key/sshrsa/').object + elif password: + result = self.connection.request('/key/password/').object + else: + result = self.connection.request('/key/').object + keys = [self._to_key(value) for value in result.get('data', [])] + + if key_group: + keys = [key for key in keys if + key.key_group == key_group] + return keys + + def ex_create_keypair(self, name, public_key=None, password=None, + key_group=None): + """Creates a key, ssh or password, for server or console + The group for the key (key_group) is 1 for Server and 4 for Console + Returns the id of the created key + """ + if public_key: + if not key_group: + key_group = 1 + data = { + 'name': name, + 'public_key': public_key, + 'key_group': key_group + + } + params = urlencode(data) + result = self.connection.request('/key/sshrsa/', data=params, + method='POST').object + else: + if not key_group: + key_group = 4 + if not password: + password = self.random_password() + data = { + 'name': name, + 'password': password, + 'key_group': key_group + } + params = urlencode(data) + result = self.connection.request('/key/password/', data=params, + method='POST').object + return result.get('data', {}).get('id', '') + + def ex_delete_keypair(self, key_id, ssh=False): + """Delete an ssh key or password given it's id + """ + if ssh: + result = self.connection.request('/key/sshrsa/%s/' % key_id, + method='DELETE').object + else: + result = self.connection.request('/key/password/%s/' % key_id, + method='DELETE').object + return result.get('response') in VALID_RESPONSE_CODES + + def create_node(self, name, size, image, server_key=None, + console_key=None, zone=None, **kwargs): + """Creates the node, and sets the ssh key, console key + NephoScale will respond with a 200-200 response after sending a valid + request. If nowait=True is specified in the args, we then ask a few + times until the server is created and assigned a public IP address, + so that deploy_node can be run + + >>> from libcloud.compute.providers import get_driver + >>> driver = get_driver('nephoscale') + >>> conn = driver('nepho_user','nepho_password') + >>> conn.list_nodes() + >>> name = 'staging-server' + >>> size = conn.list_sizes()[0] + + >>> image = conn.list_images()[9] + + >>> server_keys = conn.ex_list_keypairs(key_group=1)[0] + + >>> server_key = conn.ex_list_keypairs(key_group=1)[0].id + 70867 + >>> console_keys = conn.ex_list_keypairs(key_group=4)[0] + + >>> console_key = conn.ex_list_keypairs(key_group=4)[0].id + 70907 + >>> node = conn.create_node(name=name, size=size, image=image, \ + console_key=console_key, server_key=server_key) + + We can also create an ssh key, plus a console key and + deploy node with them + >>> server_key = conn.ex_create_keypair(name, public_key='123') + 71211 + >>> console_key = conn.ex_create_keypair(name, key_group=4) + 71213 + + We can increase the number of connect attempts to wait until + the node is created, so that deploy_node has ip address to + deploy the script + We can also specify the location + >>> location = conn.list_locations()[0] + >>> node = conn.create_node(name=name, + ... size=size, + ... image=image, + ... console_key=console_key, + ... server_key=server_key, + ... connect_attempts=10, + ... nowait=True, + ... zone=location.id) + """ + hostname = kwargs.get('hostname', name) + service_type = size.id + image = image.id + connect_attempts = int(kwargs.get('connect_attempts', + CONNECT_ATTEMPTS)) + + data = {'name': name, + 'hostname': hostname, + 'service_type': service_type, + 'image': image, + 'server_key': server_key, + 'console_key': console_key, + 'zone': zone + } + + params = urlencode(data) + try: + node = self.connection.request('/server/cloud/', data=params, + method='POST') + except Exception: + e = sys.exc_info()[1] + raise Exception("Failed to create node %s" % e) + node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[], + private_ips=[], driver=self) + + nowait = kwargs.get('ex_wait', False) + if not nowait: + return node + else: + # try to get the created node public ips, for use in deploy_node + # At this point we don't have the id of the newly created Node, + # so search name in nodes + created_node = False + while connect_attempts > 0: + nodes = self.list_nodes() + created_node = [c_node for c_node in nodes if + c_node.name == name] + if created_node: + return created_node[0] + else: + time.sleep(60) + connect_attempts = connect_attempts - 1 + return node + + def _to_node(self, data): + """Convert node in Node instances + """ + + state = NODE_STATE_MAP.get(data.get('power_status'), '4') + public_ips = [] + private_ips = [] + ip_addresses = data.get('ipaddresses', '') + # E.g. "ipaddresses": "198.120.14.6, 10.132.60.1" + if ip_addresses: + for ip in ip_addresses.split(','): + ip = ip.replace(' ', '') + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + extra = { + 'zone_data': data.get('zone'), + 'zone': data.get('zone', {}).get('name'), + 'image': data.get('image', {}).get('friendly_name'), + 'create_time': data.get('create_time'), + 'network_ports': data.get('network_ports'), + 'is_console_enabled': data.get('is_console_enabled'), + 'service_type': data.get('service_type', {}).get('friendly_name'), + 'hostname': data.get('hostname') + } + + node = Node(id=data.get('id'), name=data.get('name'), state=state, + public_ips=public_ips, private_ips=private_ips, + driver=self, extra=extra) + return node + + def _to_key(self, data): + return NodeKey(id=data.get('id'), + name=data.get('name'), + password=data.get('password'), + key_group=data.get('key_group'), + public_key=data.get('public_key')) + + def random_password(self, size=8): + value = os.urandom(size) + password = binascii.hexlify(value).decode('ascii') + return password[:size] diff -Nru libcloud-0.5.0/libcloud/compute/drivers/ninefold.py libcloud-0.15.1/libcloud/compute/drivers/ninefold.py --- libcloud-0.5.0/libcloud/compute/drivers/ninefold.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/ninefold.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.providers import Provider + +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + + +class NinefoldNodeDriver(CloudStackNodeDriver): + "Driver for Ninefold's Compute platform." + + host = 'api.ninefold.com' + path = '/compute/v1.0/' + + type = Provider.NINEFOLD + name = 'Ninefold' + website = 'http://ninefold.com/' diff -Nru libcloud-0.5.0/libcloud/compute/drivers/opennebula.py libcloud-0.15.1/libcloud/compute/drivers/opennebula.py --- libcloud-0.5.0/libcloud/compute/drivers/opennebula.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/opennebula.py 2014-07-02 18:47:55.000000000 +0000 @@ -15,99 +15,359 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """ -OpenNebula driver +OpenNebula.org driver. """ +__docformat__ = 'epytext' + from base64 import b64encode import hashlib -from xml.etree import ElementTree as ET -from libcloud.common.base import ConnectionUserAndKey, Response +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import next +from libcloud.utils.py3 import b + +from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.compute.base import NodeImage, NodeSize, StorageVolume from libcloud.common.types import InvalidCredsError from libcloud.compute.providers import Provider -from libcloud.compute.types import NodeState -from libcloud.compute.base import NodeDriver, Node, NodeLocation -from libcloud.compute.base import NodeImage, NodeSize + +__all__ = [ + 'ACTION', + 'OpenNebulaResponse', + 'OpenNebulaConnection', + 'OpenNebulaNodeSize', + 'OpenNebulaNetwork', + 'OpenNebulaNodeDriver', + 'OpenNebula_1_4_NodeDriver', + 'OpenNebula_2_0_NodeDriver', + 'OpenNebula_3_0_NodeDriver', + 'OpenNebula_3_2_NodeDriver', + 'OpenNebula_3_8_NodeDriver'] API_HOST = '' API_PORT = (4567, 443) API_SECURE = True +API_PLAIN_AUTH = False +DEFAULT_API_VERSION = '3.2' + + +class ACTION(object): + """ + All actions, except RESUME, only apply when the VM is in the "Running" + state. + """ + + STOP = 'STOPPED' + """ + The VM is stopped, and its memory state stored to a checkpoint file. VM + state, and disk image, are transferred back to the front-end. Resuming + the VM requires the VM instance to be re-scheduled. + """ + + SUSPEND = 'SUSPENDED' + """ + The VM is stopped, and its memory state stored to a checkpoint file. The VM + state, and disk image, are left on the host to be resumed later. Resuming + the VM does not require the VM to be re-scheduled. Rather, after + suspending, the VM resources are reserved for later resuming. + """ + + RESUME = 'RESUME' + """ + The VM is resumed using the saved memory state from the checkpoint file, + and the VM's disk image. The VM is either started immediately, or + re-scheduled depending on how it was suspended. + """ + + CANCEL = 'CANCEL' + """ + The VM is forcibly shutdown, its memory state is deleted. If a persistent + disk image was used, that disk image is transferred back to the front-end. + Any non-persistent disk images are deleted. + """ + + SHUTDOWN = 'SHUTDOWN' + """ + The VM is gracefully shutdown by sending the ACPI signal. If the VM does + not shutdown, then it is considered to still be running. If successfully, + shutdown, its memory state is deleted. If a persistent disk image was used, + that disk image is transferred back to the front-end. Any non-persistent + disk images are deleted. + """ + + REBOOT = 'REBOOT' + """ + Introduced in OpenNebula v3.2. + + The VM is gracefully restarted by sending the ACPI signal. + """ + + DONE = 'DONE' + """ + The VM is forcibly shutdown, its memory state is deleted. If a persistent + disk image was used, that disk image is transferred back to the front-end. + Any non-persistent disk images are deleted. + """ -class OpenNebulaResponse(Response): +class OpenNebulaResponse(XmlResponse): + """ + XmlResponse class for the OpenNebula.org driver. + """ def success(self): + """ + Check if response has the appropriate HTTP response code to be a + success. + + :rtype: ``bool`` + :return: True is success, else False. + """ i = int(self.status) return i >= 200 and i <= 299 - def parse_body(self): - if not self.body: - return None - return ET.XML(self.body) - def parse_error(self): - if int(self.status) == 401: + """ + Check if response contains any errors. + + @raise: :class:`InvalidCredsError` + + :rtype: :class:`ElementTree` + :return: Contents of HTTP response body. + """ + if int(self.status) == httplib.UNAUTHORIZED: raise InvalidCredsError(self.body) return self.body class OpenNebulaConnection(ConnectionUserAndKey): """ - Connection class for the OpenNebula driver + Connection class for the OpenNebula.org driver. + with plain_auth support """ host = API_HOST port = API_PORT secure = API_SECURE + plain_auth = API_PLAIN_AUTH responseCls = OpenNebulaResponse + def __init__(self, *args, **kwargs): + if 'plain_auth' in kwargs: + self.plain_auth = kwargs.pop('plain_auth') + super(OpenNebulaConnection, self).__init__(*args, **kwargs) + def add_default_headers(self, headers): - pass_sha1 = hashlib.sha1(self.key).hexdigest() - headers['Authorization'] = ("Basic %s" % b64encode("%s:%s" % (self.user_id, pass_sha1))) + """ + Add headers required by the OpenNebula.org OCCI interface. + + Includes adding Basic HTTP Authorization headers for authenticating + against the OpenNebula.org OCCI interface. + + :type headers: ``dict`` + :param headers: Dictionary containing HTTP headers. + + :rtype: ``dict`` + :return: Dictionary containing updated headers. + """ + if self.plain_auth: + passwd = self.key + else: + passwd = hashlib.sha1(b(self.key)).hexdigest() + headers['Authorization'] =\ + ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, + passwd))).decode('utf-8')) return headers +class OpenNebulaNodeSize(NodeSize): + """ + NodeSize class for the OpenNebula.org driver. + """ + + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + cpu=None, vcpu=None): + super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram, + disk=disk, + bandwidth=bandwidth, + price=price, driver=driver) + self.cpu = cpu + self.vcpu = vcpu + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name, self.cpu, self.vcpu)) + + +class OpenNebulaNetwork(object): + """ + Provide a common interface for handling networks of all types. + + Network objects are analogous to physical switches connecting two or + more physical nodes together. The Network object provides the interface in + libcloud through which we can manipulate networks in different cloud + providers in the same way. Network objects don't actually do much directly + themselves, instead the network driver handles the connection to the + network. + + You don't normally create a network object yourself; instead you use + a driver and then have that create the network for you. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver() + >>> network = driver.create_network() + >>> network = driver.list_networks()[0] + >>> network.name + 'dummy-1' + """ + + def __init__(self, id, name, address, size, driver, extra=None): + self.id = str(id) + self.name = name + self.address = address + self.size = size + self.driver = driver + self.uuid = self.get_uuid() + self.extra = extra or {} + + def get_uuid(self): + """ + Unique hash for this network. + + The hash is a function of an SHA1 hash of the network's ID and + its driver which means that it should be unique between all + networks. In some subclasses (e.g. GoGrid) there is no ID + available so the public IP address is used. This means that, + unlike a properly done system UUID, the same UUID may mean a + different system install at a different time + + >>> from libcloud.network.drivers.dummy import DummyNetworkDriver + >>> driver = DummyNetworkDriver() + >>> network = driver.create_network() + >>> network.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! + + :rtype: ``str`` + :return: Unique identifier for this instance. + """ + return hashlib.sha1(b("%s:%s" % (self.id, + self.driver.type))).hexdigest() + + def __repr__(self): + return (('') + % (self.uuid, self.name, self.address, self.size, + self.driver.name)) + + class OpenNebulaNodeDriver(NodeDriver): """ - OpenNebula node driver + OpenNebula.org node driver. """ connectionCls = OpenNebulaConnection - type = Provider.OPENNEBULA name = 'OpenNebula' + website = 'http://opennebula.org/' + type = Provider.OPENNEBULA NODE_STATE_MAP = { + 'INIT': NodeState.PENDING, 'PENDING': NodeState.PENDING, + 'HOLD': NodeState.PENDING, 'ACTIVE': NodeState.RUNNING, + 'STOPPED': NodeState.TERMINATED, + 'SUSPENDED': NodeState.PENDING, 'DONE': NodeState.TERMINATED, - 'STOPPED': NodeState.TERMINATED - } + 'FAILED': NodeState.TERMINATED} - def list_sizes(self, location=None): - return [ - NodeSize(id=1, - name="small", - ram=None, - disk=None, - bandwidth=None, - price=None, - driver=self), - NodeSize(id=2, - name="medium", - ram=None, - disk=None, - bandwidth=None, - price=None, - driver=self), - NodeSize(id=3, - name="large", - ram=None, - disk=None, - bandwidth=None, - price=None, - driver=self), - ] + def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION, + **kwargs): + if cls is OpenNebulaNodeDriver: + if api_version in ['1.4']: + cls = OpenNebula_1_4_NodeDriver + elif api_version in ['2.0', '2.2']: + cls = OpenNebula_2_0_NodeDriver + elif api_version in ['3.0']: + cls = OpenNebula_3_0_NodeDriver + elif api_version in ['3.2']: + cls = OpenNebula_3_2_NodeDriver + elif api_version in ['3.6']: + cls = OpenNebula_3_6_NodeDriver + elif api_version in ['3.8']: + cls = OpenNebula_3_8_NodeDriver + if 'plain_auth' not in kwargs: + kwargs['plain_auth'] = cls.plain_auth + else: + cls.plain_auth = kwargs['plain_auth'] + else: + raise NotImplementedError( + "No OpenNebulaNodeDriver found for API version %s" % + (api_version)) + return super(OpenNebulaNodeDriver, cls).__new__(cls) + + def create_node(self, **kwargs): + """ + Create a new OpenNebula node. + + @inherits: :class:`NodeDriver.create_node` + + :keyword networks: List of virtual networks to which this node should + connect. (optional) + :type networks: :class:`OpenNebulaNetwork` or + ``list`` of :class:`OpenNebulaNetwork` + """ + compute = ET.Element('COMPUTE') + + name = ET.SubElement(compute, 'NAME') + name.text = kwargs['name'] + + instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') + instance_type.text = kwargs['size'].name + + storage = ET.SubElement(compute, 'STORAGE') + ET.SubElement(storage, + 'DISK', + {'image': '%s' % (str(kwargs['image'].id))}) + + if 'networks' in kwargs: + if not isinstance(kwargs['networks'], list): + kwargs['networks'] = [kwargs['networks']] + + networkGroup = ET.SubElement(compute, 'NETWORK') + for network in kwargs['networks']: + if network.address: + ET.SubElement(networkGroup, 'NIC', + {'network': '%s' % (str(network.id)), + 'ip': network.address}) + else: + ET.SubElement(networkGroup, 'NIC', + {'network': '%s' % (str(network.id))}) + + xml = ET.tostring(compute) + node = self.connection.request('/compute', method='POST', + data=xml).object + + return self._to_node(node) + + def destroy_node(self, node): + url = '/compute/%s' % (str(node.id)) + resp = self.connection.request(url, method='DELETE') + + return resp.status == httplib.OK def list_nodes(self): return self._to_nodes(self.connection.request('/compute').object) @@ -115,105 +375,890 @@ def list_images(self, location=None): return self._to_images(self.connection.request('/storage').object) + def list_sizes(self, location=None): + """ + Return list of sizes on a provider. + + @inherits: :class:`NodeDriver.list_sizes` + + :return: List of compute node sizes supported by the cloud provider. + :rtype: ``list`` of :class:`OpenNebulaNodeSize` + """ + return [ + NodeSize(id=1, + name='small', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=2, + name='medium', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=3, + name='large', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + def list_locations(self): - return [NodeLocation(0, 'OpenNebula', 'ONE', self)] + return [NodeLocation(0, '', '', self)] - def reboot_node(self, node): - compute_id = str(node.id) + def ex_list_networks(self, location=None): + """ + List virtual networks on a provider. + + :param location: Location from which to request a list of virtual + networks. (optional) + :type location: :class:`NodeLocation` + + :return: List of virtual networks available to be connected to a + compute node. + :rtype: ``list`` of :class:`OpenNebulaNetwork` + """ + return self._to_networks(self.connection.request('/network').object) - url = '/compute/%s' % compute_id - resp1 = self.connection.request(url,method='PUT',data=self._xml_action(compute_id,'STOPPED')) + def ex_node_action(self, node, action): + """ + Build action representation and instruct node to commit action. - if resp1.status == 400: - return False + Build action representation from the compute node ID, and the + action which should be carried out on that compute node. Then + instruct the node to carry out that action. - resp2 = self.connection.request(url,method='PUT',data=self._xml_action(compute_id,'RESUME')) + :param node: Compute node instance. + :type node: :class:`Node` - if resp2.status == 400: + :param action: Action to be carried out on the compute node. + :type action: ``str`` + + :return: False if an HTTP Bad Request is received, else, True is + returned. + :rtype: ``bool`` + """ + compute_node_id = str(node.id) + + compute = ET.Element('COMPUTE') + + compute_id = ET.SubElement(compute, 'ID') + compute_id.text = compute_node_id + + state = ET.SubElement(compute, 'STATE') + state.text = action + + xml = ET.tostring(compute) + + url = '/compute/%s' % compute_node_id + resp = self.connection.request(url, method='PUT', + data=xml) + + if resp.status == httplib.BAD_REQUEST: return False + else: + return True - return True + def _to_images(self, object): + """ + Request a list of images and convert that list to a list of NodeImage + objects. - def destroy_node(self, node): - url = '/compute/%s' % (str(node.id)) - resp = self.connection.request(url,method='DELETE') + Request a list of images from the OpenNebula web interface, and + issue a request to convert each XML object representation of an image + to a NodeImage object. + + :rtype: ``list`` of :class:`NodeImage` + :return: List of images. + """ + images = [] + for element in object.findall('DISK'): + image_id = element.attrib['href'].partition('/storage/')[2] + image = self.connection.request( + ('/storage/%s' % (image_id))).object + images.append(self._to_image(image)) + + return images + + def _to_image(self, image): + """ + Take XML object containing an image description and convert to + NodeImage object. + + :type image: :class:`ElementTree` + :param image: XML representation of an image. + + :rtype: :class:`NodeImage` + :return: The newly extracted :class:`NodeImage`. + """ + return NodeImage(id=image.findtext('ID'), + name=image.findtext('NAME'), + driver=self.connection.driver, + extra={'size': image.findtext('SIZE'), + 'url': image.findtext('URL')}) + + def _to_networks(self, object): + """ + Request a list of networks and convert that list to a list of + OpenNebulaNetwork objects. + + Request a list of networks from the OpenNebula web interface, and + issue a request to convert each XML object representation of a network + to an OpenNebulaNetwork object. + + :rtype: ``list`` of :class:`OpenNebulaNetwork` + :return: List of virtual networks. + """ + networks = [] + for element in object.findall('NETWORK'): + network_id = element.attrib['href'].partition('/network/')[2] + network_element = self.connection.request( + ('/network/%s' % (network_id))).object + networks.append(self._to_network(network_element)) + + return networks + + def _to_network(self, element): + """ + Take XML object containing a network description and convert to + OpenNebulaNetwork object. + + Take XML representation containing a network description and + convert to OpenNebulaNetwork object. + + :rtype: :class:`OpenNebulaNetwork` + :return: The newly extracted :class:`OpenNebulaNetwork`. + """ + return OpenNebulaNetwork(id=element.findtext('ID'), + name=element.findtext('NAME'), + address=element.findtext('ADDRESS'), + size=element.findtext('SIZE'), + driver=self.connection.driver) + + def _to_nodes(self, object): + """ + Request a list of compute nodes and convert that list to a list of + Node objects. + + Request a list of compute nodes from the OpenNebula web interface, and + issue a request to convert each XML object representation of a node + to a Node object. + + :rtype: ``list`` of :class:`Node` + :return: A list of compute nodes. + """ + computes = [] + for element in object.findall('COMPUTE'): + compute_id = element.attrib['href'].partition('/compute/')[2] + compute = self.connection.request( + ('/compute/%s' % (compute_id))).object + computes.append(self._to_node(compute)) + + return computes + + def _to_node(self, compute): + """ + Take XML object containing a compute node description and convert to + Node object. + + Take XML representation containing a compute node description and + convert to Node object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: :class:`Node` + :return: The newly extracted :class:`Node`. + """ + try: + state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()] + except KeyError: + state = NodeState.UNKNOWN + + return Node(id=compute.findtext('ID'), + name=compute.findtext('NAME'), + state=state, + public_ips=self._extract_networks(compute), + private_ips=[], + driver=self.connection.driver, + image=self._extract_images(compute)) + + def _extract_networks(self, compute): + """ + Extract networks from a compute node XML representation. + + Extract network descriptions from a compute node XML representation, + converting each network to an OpenNebulaNetwork object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: ``list`` of :class:`OpenNebulaNetwork`s. + :return: List of virtual networks attached to the compute node. + """ + networks = list() + + network_list = compute.find('NETWORK') + for element in network_list.findall('NIC'): + networks.append( + OpenNebulaNetwork(id=element.attrib.get('network', None), + name=None, + address=element.attrib.get('ip', None), + size=1, + driver=self.connection.driver)) + + return networks + + def _extract_images(self, compute): + """ + Extract image disks from a compute node XML representation. + + Extract image disk descriptions from a compute node XML representation, + converting the disks to an NodeImage object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: :class:`NodeImage`. + :return: First disk attached to a compute node. + """ + disks = list() + + disk_list = compute.find('STORAGE') + if disk_list is not None: + for element in disk_list.findall('DISK'): + disks.append( + NodeImage(id=element.attrib.get('image', None), + name=None, + driver=self.connection.driver, + extra={'dev': element.attrib.get('dev', None)})) + + # @TODO: Return all disks when the Node type accepts multiple + # attached disks per node. + if len(disks) > 0: + return disks[0] + else: + return None + + +class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v1.4. + """ + + name = 'OpenNebula (v1.4)' - return resp.status == 204 + +class OpenNebula_2_0_NodeDriver(OpenNebulaNodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v2.0 through OpenNebula.org + v2.2. + """ + + name = 'OpenNebula (v2.0 - v2.2)' def create_node(self, **kwargs): - """Create a new OpenNebula node + """ + Create a new OpenNebula node. + + @inherits: :class:`NodeDriver.create_node` - See L{NodeDriver.create_node} for more keyword args. + :keyword networks: List of virtual networks to which this node should + connect. (optional) + :type networks: :class:`OpenNebulaNetwork` or ``list`` + of :class:`OpenNebulaNetwork` + + :keyword context: Custom (key, value) pairs to be injected into + compute node XML description. (optional) + :type context: ``dict`` + + :return: Instance of a newly created node. + :rtype: :class:`Node` """ compute = ET.Element('COMPUTE') name = ET.SubElement(compute, 'NAME') name.text = kwargs['name'] - # """ - # Other extractable (but unused) information - # """ - # instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') - # instance_type.text = kwargs['size'].name - # - # storage = ET.SubElement(compute, 'STORAGE') - # disk = ET.SubElement(storage, 'DISK', {'image': str(kwargs['image'].id), - # 'dev': 'sda1'}) + instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') + instance_type.text = kwargs['size'].name - xml = ET.tostring(compute) + disk = ET.SubElement(compute, 'DISK') + ET.SubElement(disk, + 'STORAGE', + {'href': '/storage/%s' % (str(kwargs['image'].id))}) + + if 'networks' in kwargs: + if not isinstance(kwargs['networks'], list): + kwargs['networks'] = [kwargs['networks']] + + for network in kwargs['networks']: + nic = ET.SubElement(compute, 'NIC') + ET.SubElement(nic, 'NETWORK', + {'href': '/network/%s' % (str(network.id))}) + if network.address: + ip_line = ET.SubElement(nic, 'IP') + ip_line.text = network.address + + if 'context' in kwargs: + if isinstance(kwargs['context'], dict): + contextGroup = ET.SubElement(compute, 'CONTEXT') + for key, value in list(kwargs['context'].items()): + context = ET.SubElement(contextGroup, key.upper()) + context.text = value - node = self.connection.request('/compute',method='POST',data=xml).object + xml = ET.tostring(compute) + node = self.connection.request('/compute', method='POST', + data=xml).object return self._to_node(node) + def destroy_node(self, node): + url = '/compute/%s' % (str(node.id)) + resp = self.connection.request(url, method='DELETE') + + return resp.status == httplib.NO_CONTENT + + def list_sizes(self, location=None): + """ + Return list of sizes on a provider. + + @inherits: :class:`NodeDriver.list_sizes` + + :return: List of compute node sizes supported by the cloud provider. + :rtype: ``list`` of :class:`OpenNebulaNodeSize` + """ + return [ + OpenNebulaNodeSize(id=1, + name='small', + ram=1024, + cpu=1, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=2, + name='medium', + ram=4096, + cpu=4, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=3, + name='large', + ram=8192, + cpu=8, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=4, + name='custom', + ram=0, + cpu=0, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + def _to_images(self, object): + """ + Request a list of images and convert that list to a list of NodeImage + objects. + + Request a list of images from the OpenNebula web interface, and + issue a request to convert each XML object representation of an image + to a NodeImage object. + + :rtype: ``list`` of :class:`NodeImage` + :return: List of images. + """ images = [] - for element in object.findall("DISK"): + for element in object.findall('STORAGE'): image_id = element.attrib["href"].partition("/storage/")[2] - image = self.connection.request(("/storage/%s" % (image_id))).object + image = self.connection.request( + ("/storage/%s" % (image_id))).object images.append(self._to_image(image)) return images def _to_image(self, image): - return NodeImage(id=image.findtext("ID"), - name=image.findtext("NAME"), - driver=self.connection.driver) + """ + Take XML object containing an image description and convert to + NodeImage object. - def _to_nodes(self, object): - computes = [] - for element in object.findall("COMPUTE"): - compute_id = element.attrib["href"].partition("/compute/")[2] - compute = self.connection.request(("/compute/%s" % (compute_id))).object - computes.append(self._to_node(compute)) + :type image: :class:`ElementTree` + :param image: XML representation of an image. - return computes + :rtype: :class:`NodeImage` + :return: The newly extracted :class:`NodeImage`. + """ + return NodeImage(id=image.findtext('ID'), + name=image.findtext('NAME'), + driver=self.connection.driver, + extra={'description': image.findtext('DESCRIPTION'), + 'type': image.findtext('TYPE'), + 'size': image.findtext('SIZE'), + 'fstype': image.findtext('FSTYPE', None)}) def _to_node(self, compute): + """ + Take XML object containing a compute node description and convert to + Node object. + + Take XML representation containing a compute node description and + convert to Node object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: :class:`Node` + :return: The newly extracted :class:`Node`. + """ try: - state = self.NODE_STATE_MAP[compute.findtext("STATE")] + state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()] except KeyError: state = NodeState.UNKNOWN + return Node(id=compute.findtext('ID'), + name=compute.findtext('NAME'), + state=state, + public_ips=self._extract_networks(compute), + private_ips=[], + driver=self.connection.driver, + image=self._extract_images(compute), + size=self._extract_size(compute), + extra={'context': self._extract_context(compute)}) + + def _extract_networks(self, compute): + """ + Extract networks from a compute node XML representation. + + Extract network descriptions from a compute node XML representation, + converting each network to an OpenNebulaNetwork object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: ``list`` of :class:`OpenNebulaNetwork` + :return: List of virtual networks attached to the compute node. + """ networks = [] - for element in compute.findall("NIC"): - networks.append(element.attrib["ip"]) - return Node(id=compute.findtext("ID"), - name=compute.findtext("NAME"), - state=state, - public_ip=networks, - private_ip=[], - driver=self.connection.driver) + for element in compute.findall('NIC'): + network = element.find('NETWORK') + network_id = network.attrib['href'].partition('/network/')[2] + + networks.append( + OpenNebulaNetwork(id=network_id, + name=network.attrib.get('name', None), + address=element.findtext('IP'), + size=1, + driver=self.connection.driver, + extra={'mac': element.findtext('MAC')})) + + return networks + + def _extract_images(self, compute): + """ + Extract image disks from a compute node XML representation. + + Extract image disk descriptions from a compute node XML representation, + converting the disks to an NodeImage object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: ``list`` of :class:`NodeImage` + :return: Disks attached to a compute node. + """ + disks = list() + + for element in compute.findall('DISK'): + disk = element.find('STORAGE') + image_id = disk.attrib['href'].partition('/storage/')[2] + + if 'id' in element.attrib: + disk_id = element.attrib['id'] + else: + disk_id = None + + disks.append( + NodeImage(id=image_id, + name=disk.attrib.get('name', None), + driver=self.connection.driver, + extra={'type': element.findtext('TYPE'), + 'disk_id': disk_id, + 'target': element.findtext('TARGET')})) + + # Return all disks when the Node type accepts multiple attached disks + # per node. + if len(disks) > 1: + return disks + elif len(disks) == 1: + return disks[0] + else: + return None + + def _extract_size(self, compute): + """ + Extract size, or node type, from a compute node XML representation. + + Extract node size, or node type, description from a compute node XML + representation, converting the node size to a NodeSize object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: :class:`OpenNebulaNodeSize` + :return: Node type of compute node. + """ + instance_type = compute.find('INSTANCE_TYPE') + + try: + return next((node_size for node_size in self.list_sizes() + if node_size.name == instance_type.text)) + except StopIteration: + return None + + def _extract_context(self, compute): + """ + Extract size, or node type, from a compute node XML representation. + + Extract node size, or node type, description from a compute node XML + representation, converting the node size to a NodeSize object. + + :type compute: :class:`ElementTree` + :param compute: XML representation of a compute node. + + :rtype: ``dict`` + :return: Dictionary containing (key, value) pairs related to + compute node context. + """ + contexts = dict() + context = compute.find('CONTEXT') + + if context is not None: + for context_element in list(context): + contexts[context_element.tag.lower()] = context_element.text + + return contexts + + +class OpenNebula_3_0_NodeDriver(OpenNebula_2_0_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.0. + """ + + name = 'OpenNebula (v3.0)' + + def ex_node_set_save_name(self, node, name): + """ + Build action representation and instruct node to commit action. + + Build action representation from the compute node ID, the disk image + which will be saved, and the name under which the image will be saved + upon shutting down the compute node. + + :param node: Compute node instance. + :type node: :class:`Node` + + :param name: Name under which the image should be saved after shutting + down the compute node. + :type name: ``str`` + + :return: False if an HTTP Bad Request is received, else, True is + returned. + :rtype: ``bool`` + """ + compute_node_id = str(node.id) - def _xml_action(self, compute_id, action): compute = ET.Element('COMPUTE') compute_id = ET.SubElement(compute, 'ID') - compute_id.text = str(compute_id) + compute_id.text = compute_node_id - state = ET.SubElement(compute, 'STATE') - state.text = action + disk = ET.SubElement(compute, 'DISK', {'id': str(node.image.id)}) + + ET.SubElement(disk, 'STORAGE', + {'href': '/storage/%s' % (str(node.image.id)), + 'name': node.image.name}) + + ET.SubElement(disk, 'SAVE_AS', {'name': str(name)}) xml = ET.tostring(compute) - return xml + + url = '/compute/%s' % compute_node_id + resp = self.connection.request(url, method='PUT', + data=xml) + + if resp.status == httplib.BAD_REQUEST: + return False + else: + return True + + def _to_network(self, element): + """ + Take XML object containing a network description and convert to + OpenNebulaNetwork object. + + Take XML representation containing a network description and + convert to OpenNebulaNetwork object. + + :return: The newly extracted :class:`OpenNebulaNetwork`. + :rtype: :class:`OpenNebulaNetwork` + """ + return OpenNebulaNetwork(id=element.findtext('ID'), + name=element.findtext('NAME'), + address=element.findtext('ADDRESS'), + size=element.findtext('SIZE'), + driver=self.connection.driver, + extra={'public': element.findtext('PUBLIC')}) + + +class OpenNebula_3_2_NodeDriver(OpenNebula_3_0_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.2. + """ + + name = 'OpenNebula (v3.2)' + + def reboot_node(self, node): + return self.ex_node_action(node, ACTION.REBOOT) + + def list_sizes(self, location=None): + """ + Return list of sizes on a provider. + + @inherits: :class:`NodeDriver.list_sizes` + + :return: List of compute node sizes supported by the cloud provider. + :rtype: ``list`` of :class:`OpenNebulaNodeSize` + """ + return self._to_sizes(self.connection.request('/instance_type').object) + + def _to_sizes(self, object): + """ + Request a list of instance types and convert that list to a list of + OpenNebulaNodeSize objects. + + Request a list of instance types from the OpenNebula web interface, + and issue a request to convert each XML object representation of an + instance type to an OpenNebulaNodeSize object. + + :return: List of instance types. + :rtype: ``list`` of :class:`OpenNebulaNodeSize` + """ + sizes = [] + size_id = 1 + + attributes = [('name', str, None), ('ram', int, 'MEMORY'), + ('cpu', float, None), ('vcpu', float, None), + ('disk', str, None), ('bandwidth', float, None), + ('price', float, None)] + + for element in object.findall('INSTANCE_TYPE'): + size_kwargs = {'id': size_id, 'driver': self} + values = self._get_attributes_values(attributes=attributes, + element=element) + size_kwargs.update(values) + + size = OpenNebulaNodeSize(**size_kwargs) + sizes.append(size) + size_id += 1 + + return sizes + + def _get_attributes_values(self, attributes, element): + values = {} + + for attribute_name, attribute_type, alias in attributes: + key = alias if alias else attribute_name.upper() + value = element.findtext(key) + + if value is not None: + value = attribute_type(value) + + values[attribute_name] = value + + return values + + +class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.6. + """ + + name = 'OpenNebula (v3.6)' + + def create_volume(self, size, name, location=None, snapshot=None): + storage = ET.Element('STORAGE') + + vol_name = ET.SubElement(storage, 'NAME') + vol_name.text = name + + vol_type = ET.SubElement(storage, 'TYPE') + vol_type.text = 'DATABLOCK' + + description = ET.SubElement(storage, 'DESCRIPTION') + description.text = 'Attached storage' + + public = ET.SubElement(storage, 'PUBLIC') + public.text = 'NO' + + persistent = ET.SubElement(storage, 'PERSISTENT') + persistent.text = 'YES' + + fstype = ET.SubElement(storage, 'FSTYPE') + fstype.text = 'ext3' + + vol_size = ET.SubElement(storage, 'SIZE') + vol_size.text = str(size) + + xml = ET.tostring(storage) + volume = self.connection.request('/storage', + {'occixml': xml}, + method='POST').object + + return self._to_volume(volume) + + def destroy_volume(self, volume): + url = '/storage/%s' % (str(volume.id)) + resp = self.connection.request(url, method='DELETE') + + return resp.status == httplib.NO_CONTENT + + def attach_volume(self, node, volume, device): + action = ET.Element('ACTION') + + perform = ET.SubElement(action, 'PERFORM') + perform.text = 'ATTACHDISK' + + params = ET.SubElement(action, 'PARAMS') + + ET.SubElement(params, + 'STORAGE', + {'href': '/storage/%s' % (str(volume.id))}) + + target = ET.SubElement(params, 'TARGET') + target.text = device + + xml = ET.tostring(action) + + url = '/compute/%s/action' % node.id + + resp = self.connection.request(url, method='POST', data=xml) + return resp.status == httplib.ACCEPTED + + def _do_detach_volume(self, node_id, disk_id): + action = ET.Element('ACTION') + + perform = ET.SubElement(action, 'PERFORM') + perform.text = 'DETACHDISK' + + params = ET.SubElement(action, 'PARAMS') + + ET.SubElement(params, + 'DISK', + {'id': disk_id}) + + xml = ET.tostring(action) + + url = '/compute/%s/action' % node_id + + resp = self.connection.request(url, method='POST', data=xml) + return resp.status == httplib.ACCEPTED + + def detach_volume(self, volume): + # We need to find the node using this volume + for node in self.list_nodes(): + if type(node.image) is not list: + # This node has only one associated image. It is not the one we + # are after. + continue + + for disk in node.image: + if disk.id == volume.id: + # Node found. We can now detach the volume + disk_id = disk.extra['disk_id'] + return self._do_detach_volume(node.id, disk_id) + + return False + + def list_volumes(self): + return self._to_volumes(self.connection.request('/storage').object) + + def _to_volume(self, storage): + return StorageVolume(id=storage.findtext('ID'), + name=storage.findtext('NAME'), + size=int(storage.findtext('SIZE')), + driver=self.connection.driver) + + def _to_volumes(self, object): + volumes = [] + for storage in object.findall('STORAGE'): + storage_id = storage.attrib['href'].partition('/storage/')[2] + + volumes.append(self._to_volume( + self.connection.request('/storage/%s' % storage_id).object)) + + return volumes + + +class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver): + """ + OpenNebula.org node driver for OpenNebula.org v3.8. + """ + + name = 'OpenNebula (v3.8)' + plain_auth = API_PLAIN_AUTH + + def _to_sizes(self, object): + """ + Request a list of instance types and convert that list to a list of + OpenNebulaNodeSize objects. + + Request a list of instance types from the OpenNebula web interface, + and issue a request to convert each XML object representation of an + instance type to an OpenNebulaNodeSize object. + + :return: List of instance types. + :rtype: ``list`` of :class:`OpenNebulaNodeSize` + """ + sizes = [] + size_id = 1 + + attributes = [('name', str, None), ('ram', int, 'MEMORY'), + ('cpu', float, None), ('vcpu', float, None), + ('disk', str, None), ('bandwidth', float, None), + ('price', float, None)] + + for element in object.findall('INSTANCE_TYPE'): + element = self.connection.request( + ('/instance_type/%s') % (element.attrib['name'])).object + + size_kwargs = {'id': size_id, 'driver': self} + values = self._get_attributes_values(attributes=attributes, + element=element) + size_kwargs.update(values) + + size = OpenNebulaNodeSize(**size_kwargs) + sizes.append(size) + size_id += 1 + return sizes + + def _ex_connection_class_kwargs(self): + """ + Set plain_auth as an extra :class:`OpenNebulaConnection_3_8` argument + + :return: ``dict`` of :class:`OpenNebulaConnection_3_8` input arguments + """ + + return {'plain_auth': self.plain_auth} diff -Nru libcloud-0.5.0/libcloud/compute/drivers/openstack.py libcloud-0.15.1/libcloud/compute/drivers/openstack.py --- libcloud-0.5.0/libcloud/compute/drivers/openstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/openstack.py 2014-07-02 18:47:55.000000000 +0000 @@ -0,0 +1,2439 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +OpenStack driver +""" + +try: + import simplejson as json +except ImportError: + import json + +import warnings +import base64 + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b +from libcloud.utils.py3 import next +from libcloud.utils.py3 import urlparse + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.openstack import OpenStackDriverMixin +from libcloud.common.types import MalformedResponseError, ProviderError +from libcloud.utils.networking import is_private_subnet +from libcloud.compute.base import NodeSize, NodeImage +from libcloud.compute.base import (NodeDriver, Node, NodeLocation, + StorageVolume, VolumeSnapshot) +from libcloud.compute.base import KeyPair +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.types import KeyPairDoesNotExistError +from libcloud.pricing import get_size_price +from libcloud.common.base import Response +from libcloud.utils.xml import findall + +__all__ = [ + 'OpenStack_1_0_Response', + 'OpenStack_1_0_Connection', + 'OpenStack_1_0_NodeDriver', + 'OpenStack_1_0_SharedIpGroup', + 'OpenStack_1_0_NodeIpAddresses', + 'OpenStack_1_1_Response', + 'OpenStack_1_1_Connection', + 'OpenStack_1_1_NodeDriver', + 'OpenStack_1_1_FloatingIpPool', + 'OpenStack_1_1_FloatingIpAddress', + 'OpenStackNodeDriver' +] + +ATOM_NAMESPACE = "http://www.w3.org/2005/Atom" + +DEFAULT_API_VERSION = '1.1' + + +class OpenStackException(ProviderError): + pass + + +class OpenStackResponse(Response): + node_driver = None + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def has_content_type(self, content_type): + content_type_value = self.headers.get('content-type') or '' + content_type_value = content_type_value.lower() + return content_type_value.find(content_type.lower()) > -1 + + def parse_body(self): + if self.status == httplib.NO_CONTENT or not self.body: + return None + + if self.has_content_type('application/xml'): + try: + return ET.XML(self.body) + except: + raise MalformedResponseError( + 'Failed to parse XML', + body=self.body, + driver=self.node_driver) + + elif self.has_content_type('application/json'): + try: + return json.loads(self.body) + except: + raise MalformedResponseError( + 'Failed to parse JSON', + body=self.body, + driver=self.node_driver) + else: + return self.body + + def parse_error(self): + text = None + body = self.parse_body() + + if self.has_content_type('application/xml'): + text = '; '.join([err.text or '' for err in body.getiterator() + if err.text]) + elif self.has_content_type('application/json'): + values = list(body.values()) + + context = self.connection.context + driver = self.connection.driver + key_pair_name = context.get('key_pair_name', None) + + if len(values) > 0 and values[0]['code'] == 404 and key_pair_name: + raise KeyPairDoesNotExistError(name=key_pair_name, + driver=driver) + elif len(values) > 0 and 'message' in values[0]: + text = ';'.join([fault_data['message'] for fault_data + in values]) + else: + text = body + else: + # while we hope a response is always one of xml or json, we have + # seen html or text in the past, its not clear we can really do + # something to make it more readable here, so we will just pass + # it along as the whole response body in the text variable. + text = body + + return '%s %s %s' % (self.status, self.error, text) + + +class OpenStackComputeConnection(OpenStackBaseConnection): + # default config for http://devstack.org/ + service_type = 'compute' + service_name = 'nova' + service_region = 'RegionOne' + + def request(self, action, params=None, data='', headers=None, + method='GET'): + if not headers: + headers = {} + if not params: + params = {} + + if method in ("POST", "PUT"): + headers = {'Content-Type': self.default_content_type} + + return super(OpenStackComputeConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers) + + +class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin): + """ + Base OpenStack node driver. Should not be used directly. + """ + api_name = 'openstack' + name = 'OpenStack' + website = 'http://openstack.org/' + + NODE_STATE_MAP = { + 'BUILD': NodeState.PENDING, + 'REBUILD': NodeState.PENDING, + 'ACTIVE': NodeState.RUNNING, + 'SUSPENDED': NodeState.TERMINATED, + 'DELETED': NodeState.TERMINATED, + 'QUEUE_RESIZE': NodeState.PENDING, + 'PREP_RESIZE': NodeState.PENDING, + 'VERIFY_RESIZE': NodeState.RUNNING, + 'PASSWORD': NodeState.PENDING, + 'RESCUE': NodeState.PENDING, + 'REBOOT': NodeState.REBOOTING, + 'HARD_REBOOT': NodeState.REBOOTING, + 'SHARE_IP': NodeState.PENDING, + 'SHARE_IP_NO_CONFIG': NodeState.PENDING, + 'DELETE_IP': NodeState.PENDING, + 'UNKNOWN': NodeState.UNKNOWN + } + + def __new__(cls, key, secret=None, secure=True, host=None, port=None, + api_version=DEFAULT_API_VERSION, **kwargs): + if cls is OpenStackNodeDriver: + if api_version == '1.0': + cls = OpenStack_1_0_NodeDriver + elif api_version == '1.1': + cls = OpenStack_1_1_NodeDriver + else: + raise NotImplementedError( + "No OpenStackNodeDriver found for API version %s" % + (api_version)) + return super(OpenStackNodeDriver, cls).__new__(cls) + + def __init__(self, *args, **kwargs): + OpenStackDriverMixin.__init__(self, **kwargs) + super(OpenStackNodeDriver, self).__init__(*args, **kwargs) + + def destroy_node(self, node): + uri = '/servers/%s' % (node.id) + resp = self.connection.request(uri, method='DELETE') + # The OpenStack and Rackspace documentation both say this API will + # return a 204, but in-fact, everyone everywhere agrees it actually + # returns a 202, so we are going to accept either, and someday, + # someone will fix either the implementation or the documentation to + # agree. + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + + def reboot_node(self, node): + return self._reboot_node(node, reboot_type='HARD') + + def list_nodes(self, ex_all_tenants=False): + """ + List the nodes in a tenant + + :param ex_all_tenants: List nodes for all the tenants. Note: Your user + must have admin privileges for this + functionality to work. + :type ex_all_tenants: ``bool`` + """ + params = {} + if ex_all_tenants: + params = {'all_tenants': 1} + return self._to_nodes( + self.connection.request('/servers/detail', params=params).object) + + def create_volume(self, size, name, location=None, snapshot=None): + if snapshot: + raise NotImplementedError( + "create_volume does not yet support create from snapshot") + return self.connection.request('/os-volumes', + method='POST', + data={ + 'volume': { + 'display_name': name, + 'display_description': name, + 'size': size, + 'volume_type': None, + 'metadata': { + 'contents': name, + }, + 'availability_zone': location, + } + }).success() + + def destroy_volume(self, volume): + return self.connection.request('/os-volumes/%s' % volume.id, + method='DELETE').success() + + def attach_volume(self, node, volume, device="auto"): + # when "auto" or None is provided for device, openstack will let + # the guest OS pick the next available device (fi. /dev/vdb) + return self.connection.request( + '/servers/%s/os-volume_attachments' % node.id, + method='POST', + data={ + 'volumeAttachment': { + 'volumeId': volume.id, + 'device': device, + } + }).success() + + def detach_volume(self, volume, ex_node=None): + # when ex_node is not provided, volume is detached from all nodes + failed_nodes = [] + for attachment in volume.extra['attachments']: + if not ex_node or ex_node.id == attachment['serverId']: + response = self.connection.request( + '/servers/%s/os-volume_attachments/%s' % + (attachment['serverId'], attachment['id']), + method='DELETE') + + if not response.success(): + failed_nodes.append(attachment['serverId']) + if failed_nodes: + raise OpenStackException( + 'detach_volume failed for nodes with id: %s' % + ', '.join(failed_nodes), 500, self + ) + return True + + def list_volumes(self): + return self._to_volumes( + self.connection.request('/os-volumes').object) + + def ex_get_volume(self, volumeId): + return self._to_volume( + self.connection.request('/os-volumes/%s' % volumeId).object) + + def list_images(self, location=None, ex_only_active=True): + """ + Lists all active images + + @inherits: :class:`NodeDriver.list_images` + + :param ex_only_active: True if list only active + :type ex_only_active: ``bool`` + + """ + return self._to_images( + self.connection.request('/images/detail').object, ex_only_active) + + def get_image(self, image_id): + """ + Get an image based on a image_id + + @inherits: :class:`NodeDriver.get_image` + + :param image_id: Image identifier + :type image_id: ``str`` + + :return: A NodeImage object + :rtype: :class:`NodeImage` + + """ + return self._to_image(self.connection.request( + '/images/%s' % (image_id,)).object['image']) + + def list_sizes(self, location=None): + return self._to_sizes( + self.connection.request('/flavors/detail').object) + + def list_locations(self): + return [NodeLocation(0, '', '', self)] + + def _ex_connection_class_kwargs(self): + return self.openstack_connection_kwargs() + + def ex_get_node_details(self, node_id): + """ + Lists details of the specified server. + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :rtype: :class:`Node` + """ + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s' % (node_id) + resp = self.connection.request(uri, method='GET') + if resp.status == httplib.NOT_FOUND: + return None + + return self._to_node_from_obj(resp.object) + + def ex_soft_reboot_node(self, node): + """ + Soft reboots the specified server + + :param node: node + :type node: :class:`Node` + + :rtype: ``bool`` + """ + return self._reboot_node(node, reboot_type='SOFT') + + def ex_hard_reboot_node(self, node): + """ + Hard reboots the specified server + + :param node: node + :type node: :class:`Node` + + :rtype: ``bool`` + """ + return self._reboot_node(node, reboot_type='HARD') + + +class OpenStackNodeSize(NodeSize): + """ + NodeSize class for the OpenStack.org driver. + + Following the example of OpenNebula.org driver + and following guidelines: + https://issues.apache.org/jira/browse/LIBCLOUD-119 + """ + + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + vcpus=None): + super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram, + disk=disk, + bandwidth=bandwidth, + price=price, driver=driver) + self.vcpus = vcpus + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name, self.vcpus)) + + +class OpenStack_1_0_Response(OpenStackResponse): + def __init__(self, *args, **kwargs): + # done because of a circular reference from + # NodeDriver -> Connection -> Response + self.node_driver = OpenStack_1_0_NodeDriver + super(OpenStack_1_0_Response, self).__init__(*args, **kwargs) + + +class OpenStack_1_0_Connection(OpenStackComputeConnection): + responseCls = OpenStack_1_0_Response + default_content_type = 'application/xml; charset=UTF-8' + accept_format = 'application/xml' + XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' + + +class OpenStack_1_0_NodeDriver(OpenStackNodeDriver): + """ + OpenStack node driver. + + Extra node attributes: + - password: root password, available after create. + - hostId: represents the host your cloud server runs on + - imageId: id of image + - flavorId: id of flavor + """ + connectionCls = OpenStack_1_0_Connection + type = Provider.OPENSTACK + + features = {'create_node': ['generates_password']} + + def __init__(self, *args, **kwargs): + self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', + None)) + self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE + super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs) + + def _to_images(self, object, ex_only_active): + images = [] + for image in findall(object, 'image', self.XML_NAMESPACE): + if ex_only_active and image.get('status') != 'ACTIVE': + continue + images.append(self._to_image(image)) + + return images + + def _to_image(self, element): + return NodeImage(id=element.get('id'), + name=element.get('name'), + driver=self.connection.driver, + extra={'updated': element.get('updated'), + 'created': element.get('created'), + 'status': element.get('status'), + 'serverId': element.get('serverId'), + 'progress': element.get('progress'), + 'minDisk': element.get('minDisk'), + 'minRam': element.get('minRam') + } + ) + + def _change_password_or_name(self, node, name=None, password=None): + uri = '/servers/%s' % (node.id) + + if not name: + name = node.name + + body = {'xmlns': self.XML_NAMESPACE, + 'name': name} + + if password is not None: + body['adminPass'] = password + + server_elm = ET.Element('server', body) + + resp = self.connection.request( + uri, method='PUT', data=ET.tostring(server_elm)) + + if resp.status == httplib.NO_CONTENT and password is not None: + node.extra['password'] = password + + return resp.status == httplib.NO_CONTENT + + def create_node(self, **kwargs): + """ + Create a new node + + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_files: File Path => File contents to create on + the node + :type ex_files: ``dict`` + + :keyword ex_shared_ip_group_id: The server is launched into + that shared IP group + :type ex_shared_ip_group_id: ``str`` + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + attributes = {'xmlns': self.XML_NAMESPACE, + 'name': name, + 'imageId': str(image.id), + 'flavorId': str(size.id)} + + if 'ex_shared_ip_group' in kwargs: + # Deprecate this. Be explicit and call the variable + # ex_shared_ip_group_id since user needs to pass in the id, not the + # name. + warnings.warn('ex_shared_ip_group argument is deprecated.' + ' Please use ex_shared_ip_group_id') + + if 'ex_shared_ip_group_id' in kwargs: + shared_ip_group_id = kwargs['ex_shared_ip_group_id'] + attributes['sharedIpGroupId'] = shared_ip_group_id + + server_elm = ET.Element('server', attributes) + + metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {})) + if metadata_elm: + server_elm.append(metadata_elm) + + files_elm = self._files_to_xml(kwargs.get("ex_files", {})) + if files_elm: + server_elm.append(files_elm) + + resp = self.connection.request("/servers", + method='POST', + data=ET.tostring(server_elm)) + return self._to_node(resp.object) + + def ex_set_password(self, node, password): + """ + Sets the Node's root password. + + This will reboot the instance to complete the operation. + + :class:`Node.extra['password']` will be set to the new value if the + operation was successful. + + :param node: node to set password + :type node: :class:`Node` + + :param password: new password. + :type password: ``str`` + + :rtype: ``bool`` + """ + return self._change_password_or_name(node, password=password) + + def ex_set_server_name(self, node, name): + """ + Sets the Node's name. + + This will reboot the instance to complete the operation. + + :param node: node to set name + :type node: :class:`Node` + + :param name: new name + :type name: ``str`` + + :rtype: ``bool`` + """ + return self._change_password_or_name(node, name=name) + + def ex_resize(self, node, size): + """ + Change an existing server flavor / scale the server up or down. + + :param node: node to resize. + :type node: :class:`Node` + + :param size: new size. + :type size: :class:`NodeSize` + + :rtype: ``bool`` + """ + elm = ET.Element( + 'resize', + {'xmlns': self.XML_NAMESPACE, + 'flavorId': str(size.id)} + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_confirm_resize(self, node): + """ + Confirm a resize request which is currently in progress. If a resize + request is not explicitly confirmed or reverted it's automatically + confirmed after 24 hours. + + For more info refer to the API documentation: http://goo.gl/zjFI1 + + :param node: node for which the resize request will be confirmed. + :type node: :class:`Node` + + :rtype: ``bool`` + """ + elm = ET.Element( + 'confirmResize', + {'xmlns': self.XML_NAMESPACE}, + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.NO_CONTENT + + def ex_revert_resize(self, node): + """ + Revert a resize request which is currently in progress. + All resizes are automatically confirmed after 24 hours if they have + not already been confirmed explicitly or reverted. + + For more info refer to the API documentation: http://goo.gl/AizBu + + :param node: node for which the resize request will be reverted. + :type node: :class:`Node` + + :rtype: ``bool`` + """ + elm = ET.Element( + 'revertResize', + {'xmlns': self.XML_NAMESPACE} + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.NO_CONTENT + + def ex_rebuild(self, node_id, image_id): + """ + Rebuilds the specified server. + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :param image_id: ID of the image which should be used + :type image_id: ``str`` + + :rtype: ``bool`` + """ + # @TODO: Remove those ifs in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + if isinstance(image_id, NodeImage): + image_id = image_id.id + + elm = ET.Element( + 'rebuild', + {'xmlns': self.XML_NAMESPACE, + 'imageId': image_id} + ) + + resp = self.connection.request("/servers/%s/action" % node_id, + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_create_ip_group(self, group_name, node_id=None): + """ + Creates a shared IP group. + + :param group_name: group name which should be used + :type group_name: ``str`` + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :rtype: ``bool`` + """ + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + group_elm = ET.Element( + 'sharedIpGroup', + {'xmlns': self.XML_NAMESPACE, + 'name': group_name} + ) + + if node_id: + ET.SubElement( + group_elm, + 'server', + {'id': node_id} + ) + + resp = self.connection.request('/shared_ip_groups', + method='POST', + data=ET.tostring(group_elm)) + return self._to_shared_ip_group(resp.object) + + def ex_list_ip_groups(self, details=False): + """ + Lists IDs and names for shared IP groups. + If details lists all details for shared IP groups. + + :param details: True if details is required + :type details: ``bool`` + + :rtype: ``list`` of :class:`OpenStack_1_0_SharedIpGroup` + """ + uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups' + resp = self.connection.request(uri, + method='GET') + groups = findall(resp.object, 'sharedIpGroup', + self.XML_NAMESPACE) + return [self._to_shared_ip_group(el) for el in groups] + + def ex_delete_ip_group(self, group_id): + """ + Deletes the specified shared IP group. + + :param group_id: group id which should be used + :type group_id: ``str`` + + :rtype: ``bool`` + """ + uri = '/shared_ip_groups/%s' % group_id + resp = self.connection.request(uri, method='DELETE') + return resp.status == httplib.NO_CONTENT + + def ex_share_ip(self, group_id, node_id, ip, configure_node=True): + """ + Shares an IP address to the specified server. + + :param group_id: group id which should be used + :type group_id: ``str`` + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :param ip: ip which should be used + :type ip: ``str`` + + :param configure_node: configure node + :type configure_node: ``bool`` + + :rtype: ``bool`` + """ + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + if configure_node: + str_configure = 'true' + else: + str_configure = 'false' + + elm = ET.Element( + 'shareIp', + {'xmlns': self.XML_NAMESPACE, + 'sharedIpGroupId': group_id, + 'configureServer': str_configure}, + ) + + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='PUT', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_unshare_ip(self, node_id, ip): + """ + Removes a shared IP address from the specified server. + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :param ip: ip which should be used + :type ip: ``str`` + + :rtype: ``bool`` + """ + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='DELETE') + return resp.status == httplib.ACCEPTED + + def ex_list_ip_addresses(self, node_id): + """ + List all server addresses. + + :param node_id: ID of the node which should be used + :type node_id: ``str`` + + :rtype: :class:`OpenStack_1_0_NodeIpAddresses` + """ + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s/ips' % node_id + resp = self.connection.request(uri, + method='GET') + return self._to_ip_addresses(resp.object) + + def _metadata_to_xml(self, metadata): + if len(metadata) == 0: + return None + + metadata_elm = ET.Element('metadata') + for k, v in list(metadata.items()): + meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)}) + meta_elm.text = str(v) + + return metadata_elm + + def _files_to_xml(self, files): + if len(files) == 0: + return None + + personality_elm = ET.Element('personality') + for k, v in list(files.items()): + file_elm = ET.SubElement(personality_elm, + 'file', + {'path': str(k)}) + file_elm.text = base64.b64encode(b(v)) + + return personality_elm + + def _reboot_node(self, node, reboot_type='SOFT'): + resp = self._node_action(node, ['reboot', ('type', reboot_type)]) + return resp.status == httplib.ACCEPTED + + def _node_action(self, node, body): + if isinstance(body, list): + attr = ' '.join(['%s="%s"' % (item[0], item[1]) + for item in body[1:]]) + body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr) + uri = '/servers/%s/action' % (node.id) + resp = self.connection.request(uri, method='POST', data=body) + return resp + + def _to_nodes(self, object): + node_elements = findall(object, 'server', self.XML_NAMESPACE) + return [self._to_node(el) for el in node_elements] + + def _to_node_from_obj(self, obj): + return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0]) + + def _to_node(self, el): + def get_ips(el): + return [ip.get('addr') for ip in el] + + def get_meta_dict(el): + d = {} + for meta in el: + d[meta.get('key')] = meta.text + return d + + public_ip = get_ips(findall(el, 'addresses/public/ip', + self.XML_NAMESPACE)) + private_ip = get_ips(findall(el, 'addresses/private/ip', + self.XML_NAMESPACE)) + metadata = get_meta_dict(findall(el, 'metadata/meta', + self.XML_NAMESPACE)) + + n = Node(id=el.get('id'), + name=el.get('name'), + state=self.NODE_STATE_MAP.get( + el.get('status'), NodeState.UNKNOWN), + public_ips=public_ip, + private_ips=private_ip, + driver=self.connection.driver, + extra={ + 'password': el.get('adminPass'), + 'hostId': el.get('hostId'), + 'imageId': el.get('imageId'), + 'flavorId': el.get('flavorId'), + 'uri': "https://%s%s/servers/%s" % ( + self.connection.host, + self.connection.request_path, el.get('id')), + 'metadata': metadata}) + return n + + def _to_sizes(self, object): + elements = findall(object, 'flavor', self.XML_NAMESPACE) + return [self._to_size(el) for el in elements] + + def _to_size(self, el): + vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None + return OpenStackNodeSize(id=el.get('id'), + name=el.get('name'), + ram=int(el.get('ram')), + disk=int(el.get('disk')), + # XXX: needs hardcode + vcpus=vcpus, + bandwidth=None, + # Hardcoded + price=self._get_size_price(el.get('id')), + driver=self.connection.driver) + + def ex_limits(self): + """ + Extra call to get account's limits, such as + rates (for example amount of POST requests per day) + and absolute limits like total amount of available + RAM to be used by servers. + + :return: dict with keys 'rate' and 'absolute' + :rtype: ``dict`` + """ + + def _to_rate(el): + rate = {} + for item in list(el.items()): + rate[item[0]] = item[1] + + return rate + + def _to_absolute(el): + return {el.get('name'): el.get('value')} + + limits = self.connection.request("/limits").object + rate = [_to_rate(el) for el in findall(limits, 'rate/limit', + self.XML_NAMESPACE)] + absolute = {} + for item in findall(limits, 'absolute/limit', + self.XML_NAMESPACE): + absolute.update(_to_absolute(item)) + + return {"rate": rate, "absolute": absolute} + + def create_image(self, node, name, description=None, reboot=True): + """Create an image for node. + + @inherits: :class:`NodeDriver.create_image` + + :param node: node to use as a base for image + :type node: :class:`Node` + + :param name: name for new image + :type name: ``str`` + + :rtype: :class:`NodeImage` + """ + + image_elm = ET.Element( + 'image', + {'xmlns': self.XML_NAMESPACE, + 'name': name, + 'serverId': node.id} + ) + + return self._to_image( + self.connection.request("/images", method="POST", + data=ET.tostring(image_elm)).object) + + def delete_image(self, image): + """Delete an image for node. + + @inherits: :class:`NodeDriver.delete_image` + + :param image: the image to be deleted + :type image: :class:`NodeImage` + + :rtype: ``bool`` + """ + uri = '/images/%s' % image.id + resp = self.connection.request(uri, method='DELETE') + return resp.status == httplib.NO_CONTENT + + def _to_shared_ip_group(self, el): + servers_el = findall(el, 'servers', self.XML_NAMESPACE) + if servers_el: + servers = [s.get('id') + for s in findall(servers_el[0], 'server', + self.XML_NAMESPACE)] + else: + servers = None + return OpenStack_1_0_SharedIpGroup(id=el.get('id'), + name=el.get('name'), + servers=servers) + + def _to_ip_addresses(self, el): + public_ips = [ip.get('addr') for ip in findall( + findall(el, 'public', self.XML_NAMESPACE)[0], + 'ip', self.XML_NAMESPACE)] + private_ips = [ip.get('addr') for ip in findall( + findall(el, 'private', self.XML_NAMESPACE)[0], + 'ip', self.XML_NAMESPACE)] + + return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips) + + def _get_size_price(self, size_id): + try: + return get_size_price(driver_type='compute', + driver_name=self.api_name, + size_id=size_id) + except KeyError: + return 0.0 + + +class OpenStack_1_0_SharedIpGroup(object): + """ + Shared IP group info. + """ + + def __init__(self, id, name, servers=None): + self.id = str(id) + self.name = name + self.servers = servers + + +class OpenStack_1_0_NodeIpAddresses(object): + """ + List of public and private IP addresses of a Node. + """ + + def __init__(self, public_addresses, private_addresses): + self.public_addresses = public_addresses + self.private_addresses = private_addresses + + +class OpenStack_1_1_Response(OpenStackResponse): + def __init__(self, *args, **kwargs): + # done because of a circular reference from + # NodeDriver -> Connection -> Response + self.node_driver = OpenStack_1_1_NodeDriver + super(OpenStack_1_1_Response, self).__init__(*args, **kwargs) + + +class OpenStackNetwork(object): + """ + A Virtual Network. + """ + + def __init__(self, id, name, cidr, driver, extra=None): + self.id = str(id) + self.name = name + self.cidr = cidr + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return '' % (self.id, + self.name, + self.cidr,) + + +class OpenStackSecurityGroup(object): + """ + A Security Group. + """ + + def __init__(self, id, tenant_id, name, description, driver, rules=None, + extra=None): + """ + Constructor. + + :keyword id: Group id. + :type id: ``str`` + + :keyword tenant_id: Owner of the security group. + :type tenant_id: ``str`` + + :keyword name: Human-readable name for the security group. Might + not be unique. + :type name: ``str`` + + :keyword description: Human-readable description of a security + group. + :type description: ``str`` + + :keyword rules: Rules associated with this group. + :type rules: ``list`` of + :class:`OpenStackSecurityGroupRule` + + :keyword extra: Extra attributes associated with this group. + :type extra: ``dict`` + """ + self.id = id + self.tenant_id = tenant_id + self.name = name + self.description = description + self.driver = driver + self.rules = rules or [] + self.extra = extra or {} + + def __repr__(self): + return ('' % (self.id, self.tenant_id, self.name, + self.description)) + + +class OpenStackSecurityGroupRule(object): + """ + A Rule of a Security Group. + """ + + def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port, + driver, ip_range=None, group=None, tenant_id=None, + extra=None): + """ + Constructor. + + :keyword id: Rule id. + :type id: ``str`` + + :keyword parent_group_id: ID of the parent security group. + :type parent_group_id: ``str`` + + :keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc). + :type ip_protocol: ``str`` + + :keyword from_port: Port at start of range. + :type from_port: ``int`` + + :keyword to_port: Port at end of range. + :type to_port: ``int`` + + :keyword ip_range: CIDR for address range. + :type ip_range: ``str`` + + :keyword group: Name of a source security group to apply to rule. + :type group: ``str`` + + :keyword tenant_id: Owner of the security group. + :type tenant_id: ``str`` + + :keyword extra: Extra attributes associated with this rule. + :type extra: ``dict`` + """ + self.id = id + self.parent_group_id = parent_group_id + self.ip_protocol = ip_protocol + self.from_port = from_port + self.to_port = to_port + self.driver = driver + self.ip_range = '' + self.group = {} + + if group is None: + self.ip_range = ip_range + else: + self.group = {'name': group, 'tenant_id': tenant_id} + + self.tenant_id = tenant_id + self.extra = extra or {} + + def __repr__(self): + return ('' % (self.id, + self.parent_group_id, self.ip_protocol, self.from_port, + self.to_port)) + + +class OpenStackKeyPair(object): + """ + A KeyPair. + """ + + def __init__(self, name, fingerprint, public_key, driver, private_key=None, + extra=None): + """ + Constructor. + + :keyword name: Name of the KeyPair. + :type name: ``str`` + + :keyword fingerprint: Fingerprint of the KeyPair + :type fingerprint: ``str`` + + :keyword public_key: Public key in OpenSSH format. + :type public_key: ``str`` + + :keyword private_key: Private key in PEM format. + :type private_key: ``str`` + + :keyword extra: Extra attributes associated with this KeyPair. + :type extra: ``dict`` + """ + self.name = name + self.fingerprint = fingerprint + self.public_key = public_key + self.private_key = private_key + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return ('' + % (self.name, self.fingerprint, self.public_key)) + + +class OpenStack_1_1_Connection(OpenStackComputeConnection): + responseCls = OpenStack_1_1_Response + accept_format = 'application/json' + default_content_type = 'application/json; charset=UTF-8' + + def encode_data(self, data): + return json.dumps(data) + + +class OpenStack_1_1_NodeDriver(OpenStackNodeDriver): + """ + OpenStack node driver. + """ + connectionCls = OpenStack_1_1_Connection + type = Provider.OPENSTACK + + features = {"create_node": ["generates_password"]} + _networks_url_prefix = '/os-networks' + + def __init__(self, *args, **kwargs): + self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', + None)) + super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs) + + def create_node(self, **kwargs): + """Create a new node + + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_keyname: The name of the key pair + :type ex_keyname: ``str`` + + :keyword ex_userdata: String containing user data + see + https://help.ubuntu.com/community/CloudInit + :type ex_userdata: ``str`` + + :keyword ex_security_groups: List of security groups to assign to + the node + :type ex_security_groups: ``list`` of + :class:`OpenStackSecurityGroup` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_files: File Path => File contents to create on + the no de + :type ex_files: ``dict`` + + + :keyword networks: The server is launched into a set of Networks. + :type networks: :class:`OpenStackNetwork` + + :keyword ex_disk_config: Name of the disk configuration. + Can be either ``AUTO`` or ``MANUAL``. + :type ex_disk_config: ``str`` + + :keyword ex_admin_pass: The root password for the node + :type ex_admin_pass: ``str`` + + :keyword ex_availability_zone: Nova availability zone for the node + :type ex_availability_zone: ``str`` + """ + + server_params = self._create_args_to_params(None, **kwargs) + + resp = self.connection.request("/servers", + method='POST', + data={'server': server_params}) + + create_response = resp.object['server'] + server_resp = self.connection.request( + '/servers/%s' % create_response['id']) + server_object = server_resp.object['server'] + + # adminPass is not always present + # http://docs.openstack.org/essex/openstack-compute/admin/ + # content/configuring-compute-API.html#d6e1833 + server_object['adminPass'] = create_response.get('adminPass', None) + + return self._to_node(server_object) + + def _to_images(self, obj, ex_only_active): + images = [] + for image in obj['images']: + if ex_only_active and image.get('status') != 'ACTIVE': + continue + images.append(self._to_image(image)) + + return images + + def _to_image(self, api_image): + server = api_image.get('server', {}) + return NodeImage( + id=api_image['id'], + name=api_image['name'], + driver=self, + extra=dict( + updated=api_image['updated'], + created=api_image['created'], + status=api_image['status'], + progress=api_image.get('progress'), + metadata=api_image.get('metadata'), + serverId=server.get('id'), + minDisk=api_image.get('minDisk'), + minRam=api_image.get('minRam'), + ) + ) + + def _to_nodes(self, obj): + servers = obj['servers'] + return [self._to_node(server) for server in servers] + + def _to_volumes(self, obj): + volumes = obj['volumes'] + return [self._to_volume(volume) for volume in volumes] + + def _to_snapshots(self, obj): + snapshots = obj['snapshots'] + return [self._to_snapshot(snapshot) for snapshot in snapshots] + + def _to_sizes(self, obj): + flavors = obj['flavors'] + return [self._to_size(flavor) for flavor in flavors] + + def _create_args_to_params(self, node, **kwargs): + server_params = { + 'name': kwargs.get('name'), + 'metadata': kwargs.get('ex_metadata', {}), + 'personality': self._files_to_personality(kwargs.get("ex_files", + {})) + } + + if 'ex_availability_zone' in kwargs: + server_params['availability_zone'] = kwargs['ex_availability_zone'] + + if 'ex_keyname' in kwargs: + server_params['key_name'] = kwargs['ex_keyname'] + + if 'ex_userdata' in kwargs: + server_params['user_data'] = base64.b64encode( + b(kwargs['ex_userdata'])).decode('ascii') + + if 'ex_disk_config' in kwargs: + server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config'] + + if 'ex_admin_pass' in kwargs: + server_params['adminPass'] = kwargs['ex_admin_pass'] + + if 'networks' in kwargs: + networks = kwargs['networks'] + networks = [{'uuid': network.id} for network in networks] + server_params['networks'] = networks + + if 'ex_security_groups' in kwargs: + server_params['security_groups'] = [] + for security_group in kwargs['ex_security_groups']: + name = security_group.name + server_params['security_groups'].append({'name': name}) + + if 'name' in kwargs: + server_params['name'] = kwargs.get('name') + else: + server_params['name'] = node.name + + if 'image' in kwargs: + server_params['imageRef'] = kwargs.get('image').id + else: + server_params['imageRef'] = node.extra.get('imageId') + + if 'size' in kwargs: + server_params['flavorRef'] = kwargs.get('size').id + else: + server_params['flavorRef'] = node.extra.get('flavorId') + + return server_params + + def _files_to_personality(self, files): + rv = [] + + for k, v in list(files.items()): + rv.append({'path': k, 'contents': base64.b64encode(b(v))}) + + return rv + + def _reboot_node(self, node, reboot_type='SOFT'): + resp = self._node_action(node, 'reboot', type=reboot_type) + return resp.status == httplib.ACCEPTED + + def ex_set_password(self, node, password): + """ + Changes the administrator password for a specified server. + + :param node: Node to rebuild. + :type node: :class:`Node` + + :param password: The administrator password. + :type password: ``str`` + + :rtype: ``bool`` + """ + resp = self._node_action(node, 'changePassword', adminPass=password) + node.extra['password'] = password + return resp.status == httplib.ACCEPTED + + def ex_rebuild(self, node, image, **kwargs): + """ + Rebuild a Node. + + :param node: Node to rebuild. + :type node: :class:`Node` + + :param image: New image to use. + :type image: :class:`NodeImage` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_files: File Path => File contents to create on + the no de + :type ex_files: ``dict`` + + :keyword ex_keyname: Name of existing public key to inject into + instance + :type ex_keyname: ``str`` + + :keyword ex_userdata: String containing user data + see + https://help.ubuntu.com/community/CloudInit + :type ex_userdata: ``str`` + + :keyword ex_security_groups: List of security groups to assign to + the node + :type ex_security_groups: ``list`` of + :class:`OpenStackSecurityGroup` + + :keyword ex_disk_config: Name of the disk configuration. + Can be either ``AUTO`` or ``MANUAL``. + :type ex_disk_config: ``str`` + + :rtype: ``bool`` + """ + server_params = self._create_args_to_params(node, image=image, + **kwargs) + resp = self._node_action(node, 'rebuild', **server_params) + return resp.status == httplib.ACCEPTED + + def ex_resize(self, node, size): + """ + Change a node size. + + :param node: Node to resize. + :type node: :class:`Node` + + :type size: :class:`NodeSize` + :param size: New size to use. + + :rtype: ``bool`` + """ + server_params = self._create_args_to_params(node, size=size) + resp = self._node_action(node, 'resize', **server_params) + return resp.status == httplib.ACCEPTED + + def ex_confirm_resize(self, node): + """ + Confirms a pending resize action. + + :param node: Node to resize. + :type node: :class:`Node` + + :rtype: ``bool`` + """ + resp = self._node_action(node, 'confirmResize') + return resp.status == httplib.NO_CONTENT + + def ex_revert_resize(self, node): + """ + Cancels and reverts a pending resize action. + + :param node: Node to resize. + :type node: :class:`Node` + + :rtype: ``bool`` + """ + resp = self._node_action(node, 'revertResize') + return resp.status == httplib.ACCEPTED + + def create_image(self, node, name, metadata=None): + """ + Creates a new image. + + :param node: Node + :type node: :class:`Node` + + :param name: The name for the new image. + :type name: ``str`` + + :param metadata: Key and value pairs for metadata. + :type metadata: ``dict`` + + :rtype: :class:`NodeImage` + """ + optional_params = {} + if metadata: + optional_params['metadata'] = metadata + resp = self._node_action(node, 'createImage', name=name, + **optional_params) + image_id = self._extract_image_id_from_url(resp.headers['location']) + return self.get_image(image_id=image_id) + + def ex_set_server_name(self, node, name): + """ + Sets the Node's name. + + :param node: Node + :type node: :class:`Node` + + :param name: The name of the server. + :type name: ``str`` + + :rtype: :class:`Node` + """ + return self._update_node(node, name=name) + + def ex_get_metadata(self, node): + """ + Get a Node's metadata. + + :param node: Node + :type node: :class:`Node` + + :return: Key/Value metadata associated with node. + :rtype: ``dict`` + """ + return self.connection.request( + '/servers/%s/metadata' % (node.id,), + method='GET',).object['metadata'] + + def ex_set_metadata(self, node, metadata): + """ + Sets the Node's metadata. + + :param node: Node + :type node: :class:`Node` + + :param metadata: Key/Value metadata to associate with a node + :type metadata: ``dict`` + + :rtype: ``dict`` + """ + return self.connection.request( + '/servers/%s/metadata' % (node.id,), method='PUT', + data={'metadata': metadata} + ).object['metadata'] + + def ex_update_node(self, node, **node_updates): + """ + Update the Node's editable attributes. The OpenStack API currently + supports editing name and IPv4/IPv6 access addresses. + + The driver currently only supports updating the node name. + + :param node: Node + :type node: :class:`Node` + + :keyword name: New name for the server + :type name: ``str`` + + :rtype: :class:`Node` + """ + potential_data = self._create_args_to_params(node, **node_updates) + updates = {'name': potential_data['name']} + return self._update_node(node, **updates) + + def _to_networks(self, obj): + networks = obj['networks'] + return [self._to_network(network) for network in networks] + + def _to_network(self, obj): + return OpenStackNetwork(id=obj['id'], + name=obj['label'], + cidr=obj.get('cidr', None), + driver=self) + + def ex_list_networks(self): + """ + Get a list of Networks that are available. + + :rtype: ``list`` of :class:`OpenStackNetwork` + """ + response = self.connection.request(self._networks_url_prefix).object + return self._to_networks(response) + + def ex_create_network(self, name, cidr): + """ + Create a new Network + + :param name: Name of network which should be used + :type name: ``str`` + + :param cidr: cidr of network which should be used + :type cidr: ``str`` + + :rtype: :class:`OpenStackNetwork` + """ + data = {'network': {'cidr': cidr, 'label': name}} + response = self.connection.request(self._networks_url_prefix, + method='POST', data=data).object + return self._to_network(response['network']) + + def ex_delete_network(self, network): + """ + Get a list of NodeNetorks that are available. + + :param network: Network which should be used + :type network: :class:`OpenStackNetwork` + + :rtype: ``bool`` + """ + resp = self.connection.request('%s/%s' % (self._networks_url_prefix, + network.id), + method='DELETE') + return resp.status == httplib.ACCEPTED + + def ex_get_console_output(self, node, length=None): + """ + Get console output + + :param node: node + :type node: :class:`Node` + + :param length: Optional number of lines to fetch from the + console log + :type length: ``int`` + + :return: Dictionary with the output + :rtype: ``dict`` + """ + + data = { + "os-getConsoleOutput": { + "length": length + } + } + + resp = self.connection.request('/servers/%s/action' % node.id, + method='POST', data=data).object + return resp + + def ex_list_snapshots(self): + return self._to_snapshots( + self.connection.request('/os-snapshots').object) + + def ex_create_snapshot(self, volume, name, description=None, force=False): + """ + Create a snapshot based off of a volume. + + :param volume: volume + :type volume: :class:`StorageVolume` + + :keyword name: New name for the volume snapshot + :type name: ``str`` + + :keyword description: Description of the snapshot (optional) + :type description: ``str`` + + :keyword force: Whether to force creation (optional) + :type force: ``bool`` + + :rtype: :class:`VolumeSnapshot` + """ + data = {'snapshot': {'display_name': name, + 'display_description': description, + 'volume_id': volume.id, + 'force': force}} + + return self._to_snapshot(self.connection.request('/os-snapshots', + method='POST', + data=data).object) + + def ex_delete_snapshot(self, snapshot): + """ + Delete a VolumeSnapshot + + :param snapshot: snapshot + :type snapshot: :class:`VolumeSnapshot` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-snapshots/%s' % snapshot.id, + method='DELETE') + return resp.status == httplib.NO_CONTENT + + def _to_security_group_rules(self, obj): + return [self._to_security_group_rule(security_group_rule) for + security_group_rule in obj] + + def _to_security_group_rule(self, obj): + ip_range = group = tenant_id = None + if obj['group'] == {}: + ip_range = obj['ip_range'].get('cidr', None) + else: + group = obj['group'].get('name', None) + tenant_id = obj['group'].get('tenant_id', None) + + return OpenStackSecurityGroupRule( + id=obj['id'], parent_group_id=obj['parent_group_id'], + ip_protocol=obj['ip_protocol'], from_port=obj['from_port'], + to_port=obj['to_port'], driver=self, ip_range=ip_range, + group=group, tenant_id=tenant_id) + + def _to_security_groups(self, obj): + security_groups = obj['security_groups'] + return [self._to_security_group(security_group) for security_group in + security_groups] + + def _to_security_group(self, obj): + rules = self._to_security_group_rules(obj.get('rules', [])) + return OpenStackSecurityGroup(id=obj['id'], + tenant_id=obj['tenant_id'], + name=obj['name'], + description=obj.get('description', ''), + rules=rules, + driver=self) + + def ex_list_security_groups(self): + """ + Get a list of Security Groups that are available. + + :rtype: ``list`` of :class:`OpenStackSecurityGroup` + """ + return self._to_security_groups( + self.connection.request('/os-security-groups').object) + + def ex_get_node_security_groups(self, node): + """ + Get Security Groups of the specified server. + + :rtype: ``list`` of :class:`OpenStackSecurityGroup` + """ + return self._to_security_groups( + self.connection.request('/servers/%s/os-security-groups' % + (node.id)).object) + + def ex_create_security_group(self, name, description): + """ + Create a new Security Group + + :param name: Name of the new Security Group + :type name: ``str`` + + :param description: Description of the new Security Group + :type description: ``str`` + + :rtype: :class:`OpenStackSecurityGroup` + """ + return self._to_security_group(self.connection.request( + '/os-security-groups', method='POST', + data={'security_group': {'name': name, 'description': description}} + ).object['security_group']) + + def ex_delete_security_group(self, security_group): + """ + Delete a Security Group. + + :param security_group: Security Group should be deleted + :type security_group: :class:`OpenStackSecurityGroup` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-security-groups/%s' % + (security_group.id), + method='DELETE') + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + + def ex_create_security_group_rule(self, security_group, ip_protocol, + from_port, to_port, cidr=None, + source_security_group=None): + """ + Create a new Rule in a Security Group + + :param security_group: Security Group in which to add the rule + :type security_group: :class:`OpenStackSecurityGroup` + + :param ip_protocol: Protocol to which this rule applies + Examples: tcp, udp, ... + :type ip_protocol: ``str`` + + :param from_port: First port of the port range + :type from_port: ``int`` + + :param to_port: Last port of the port range + :type to_port: ``int`` + + :param cidr: CIDR notation of the source IP range for this rule + :type cidr: ``str`` + + :param source_security_group: Existing Security Group to use as the + source (instead of CIDR) + :type source_security_group: L{OpenStackSecurityGroup + + :rtype: :class:`OpenStackSecurityGroupRule` + """ + source_security_group_id = None + if type(source_security_group) == OpenStackSecurityGroup: + source_security_group_id = source_security_group.id + + return self._to_security_group_rule(self.connection.request( + '/os-security-group-rules', method='POST', + data={'security_group_rule': { + 'ip_protocol': ip_protocol, + 'from_port': from_port, + 'to_port': to_port, + 'cidr': cidr, + 'group_id': source_security_group_id, + 'parent_group_id': security_group.id}} + ).object['security_group_rule']) + + def ex_delete_security_group_rule(self, rule): + """ + Delete a Rule from a Security Group. + + :param rule: Rule should be deleted + :type rule: :class:`OpenStackSecurityGroupRule` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-security-group-rules/%s' % + (rule.id), method='DELETE') + return resp.status == httplib.NO_CONTENT + + def _to_key_pairs(self, obj): + key_pairs = obj['keypairs'] + key_pairs = [self._to_key_pair(key_pair['keypair']) for key_pair in + key_pairs] + return key_pairs + + def _to_key_pair(self, obj): + key_pair = KeyPair(name=obj['name'], + fingerprint=obj['fingerprint'], + public_key=obj['public_key'], + private_key=obj.get('private_key', None), + driver=self) + return key_pair + + def list_key_pairs(self): + response = self.connection.request('/os-keypairs') + key_pairs = self._to_key_pairs(response.object) + return key_pairs + + def get_key_pair(self, name): + self.connection.set_context({'key_pair_name': name}) + + response = self.connection.request('/os-keypairs/%s' % (name)) + key_pair = self._to_key_pair(response.object['keypair']) + return key_pair + + def create_key_pair(self, name): + data = {'keypair': {'name': name}} + response = self.connection.request('/os-keypairs', method='POST', + data=data) + key_pair = self._to_key_pair(response.object['keypair']) + return key_pair + + def import_key_pair_from_string(self, name, key_material): + data = {'keypair': {'name': name, 'public_key': key_material}} + response = self.connection.request('/os-keypairs', method='POST', + data=data) + key_pair = self._to_key_pair(response.object['keypair']) + return key_pair + + def delete_key_pair(self, key_pair): + """ + Delete a KeyPair. + + :param keypair: KeyPair to delete + :type keypair: :class:`OpenStackKeyPair` + + :rtype: ``bool`` + """ + response = self.connection.request('/os-keypairs/%s' % (key_pair.name), + method='DELETE') + return response.status == httplib.ACCEPTED + + def ex_list_keypairs(self): + """ + Get a list of KeyPairs that are available. + + :rtype: ``list`` of :class:`OpenStackKeyPair` + """ + warnings.warn('This method has been deprecated in favor of ' + 'list_key_pairs method') + + return self.list_key_pairs() + + def ex_create_keypair(self, name): + """ + Create a new KeyPair + + :param name: Name of the new KeyPair + :type name: ``str`` + + :rtype: :class:`OpenStackKeyPair` + """ + warnings.warn('This method has been deprecated in favor of ' + 'create_key_pair method') + + return self.create_key_pair(name=name) + + def ex_import_keypair(self, name, keyfile): + """ + Import a KeyPair from a file + + :param name: Name of the new KeyPair + :type name: ``str`` + + :param keyfile: Path to the public key file (in OpenSSH format) + :type keyfile: ``str`` + + :rtype: :class:`OpenStackKeyPair` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_file method') + + return self.import_key_pair_from_file(name=name, key_file_path=keyfile) + + def ex_import_keypair_from_string(self, name, key_material): + """ + Import a KeyPair from a string + + :param name: Name of the new KeyPair + :type name: ``str`` + + :param key_material: Public key (in OpenSSH format) + :type key_material: ``str`` + + :rtype: :class:`OpenStackKeyPair` + """ + warnings.warn('This method has been deprecated in favor of ' + 'import_key_pair_from_string method') + + return self.import_key_pair_from_string(name=name, + key_material=key_material) + + def ex_delete_keypair(self, keypair): + """ + Delete a KeyPair. + + :param keypair: KeyPair to delete + :type keypair: :class:`OpenStackKeyPair` + + :rtype: ``bool`` + """ + warnings.warn('This method has been deprecated in favor of ' + 'delete_key_pair method') + + return self.delete_key_pair(key_pair=keypair) + + def ex_get_size(self, size_id): + """ + Get a NodeSize + + :param size_id: ID of the size which should be used + :type size_id: ``str`` + + :rtype: :class:`NodeSize` + """ + return self._to_size(self.connection.request( + '/flavors/%s' % (size_id,)) .object['flavor']) + + def get_image(self, image_id): + """ + Get a NodeImage + + @inherits: :class:`NodeDriver.get_image` + + :param image_id: ID of the image which should be used + :type image_id: ``str`` + + :rtype: :class:`NodeImage` + """ + return self._to_image(self.connection.request( + '/images/%s' % (image_id,)).object['image']) + + def delete_image(self, image): + """ + Delete a NodeImage + + @inherits: :class:`NodeDriver.delete_image` + + :param image: image witch should be used + :type image: :class:`NodeImage` + + :rtype: ``bool`` + """ + resp = self.connection.request('/images/%s' % (image.id,), + method='DELETE') + return resp.status == httplib.NO_CONTENT + + def _node_action(self, node, action, **params): + params = params or None + return self.connection.request('/servers/%s/action' % (node.id,), + method='POST', data={action: params}) + + def _update_node(self, node, **node_updates): + """ + Updates the editable attributes of a server, which currently include + its name and IPv4/IPv6 access addresses. + """ + return self._to_node( + self.connection.request( + '/servers/%s' % (node.id,), method='PUT', + data={'server': node_updates} + ).object['server'] + ) + + def _to_node_from_obj(self, obj): + return self._to_node(obj['server']) + + def _to_node(self, api_node): + public_networks_labels = ['public', 'internet'] + + public_ips, private_ips = [], [] + + for label, values in api_node['addresses'].items(): + ips = [v['addr'] for v in values] + + if label in public_networks_labels: + public_ips.extend(ips) + else: + for ip in ips: + # is_private_subnet does not check for ipv6 + try: + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + except: + private_ips.append(ip) + + # Sometimes 'image' attribute is not present if the node is in an error + # state + image = api_node.get('image', None) + image_id = image.get('id', None) if image else None + + return Node( + id=api_node['id'], + name=api_node['name'], + state=self.NODE_STATE_MAP.get(api_node['status'], + NodeState.UNKNOWN), + public_ips=public_ips, + private_ips=private_ips, + driver=self, + extra=dict( + hostId=api_node['hostId'], + access_ip=api_node.get('accessIPv4'), + # Docs says "tenantId", but actual is "tenant_id". *sigh* + # Best handle both. + tenantId=api_node.get('tenant_id') or api_node['tenantId'], + imageId=image_id, + flavorId=api_node['flavor']['id'], + uri=next(link['href'] for link in api_node['links'] if + link['rel'] == 'self'), + metadata=api_node['metadata'], + password=api_node.get('adminPass', None), + created=api_node['created'], + updated=api_node['updated'], + key_name=api_node.get('key_name', None), + disk_config=api_node.get('OS-DCF:diskConfig', None), + availability_zone=api_node.get('OS-EXT-AZ:availability_zone', + None), + ), + ) + + def _to_volume(self, api_node): + if 'volume' in api_node: + api_node = api_node['volume'] + return StorageVolume( + id=api_node['id'], + name=api_node['displayName'], + size=api_node['size'], + driver=self, + extra={ + 'description': api_node['displayDescription'], + 'attachments': [att for att in api_node['attachments'] if att], + } + ) + + def _to_snapshot(self, data): + if 'snapshot' in data: + data = data['snapshot'] + + volume_id = data.get('volume_id', data.get('volumeId', None)) + display_name = data.get('display_name', data.get('displayName', None)) + created_at = data.get('created_at', data.get('createdAt', None)) + description = data.get('display_description', + data.get('displayDescription', None)) + status = data.get('status', None) + + extra = {'volume_id': volume_id, + 'name': display_name, + 'created': created_at, + 'description': description, + 'status': status} + + snapshot = VolumeSnapshot(id=data['id'], driver=self, + size=data['size'], extra=extra) + return snapshot + + def _to_size(self, api_flavor, price=None, bandwidth=None): + # if provider-specific subclasses can get better values for + # price/bandwidth, then can pass them in when they super(). + if not price: + price = self._get_size_price(str(api_flavor['id'])) + + return OpenStackNodeSize( + id=api_flavor['id'], + name=api_flavor['name'], + ram=api_flavor['ram'], + disk=api_flavor['disk'], + vcpus=api_flavor['vcpus'], + bandwidth=bandwidth, + price=price, + driver=self, + ) + + def _get_size_price(self, size_id): + try: + return get_size_price( + driver_type='compute', + driver_name=self.api_name, + size_id=size_id, + ) + except KeyError: + return(0.0) + + def _extract_image_id_from_url(self, location_header): + path = urlparse.urlparse(location_header).path + image_id = path.split('/')[-1] + return image_id + + def ex_rescue(self, node, password=None): + # Requires Rescue Mode extension + """ + Rescue a node + + :param node: node + :type node: :class:`Node` + + :param password: password + :type password: ``str`` + + :rtype: :class:`Node` + """ + if password: + resp = self._node_action(node, 'rescue', adminPass=password) + else: + resp = self._node_action(node, 'rescue') + password = json.loads(resp.body)['adminPass'] + node.extra['password'] = password + return node + + def ex_unrescue(self, node): + """ + Unrescue a node + + :param node: node + :type node: :class:`Node` + + :rtype: ``bool`` + """ + resp = self._node_action(node, 'unrescue') + return resp.status == httplib.ACCEPTED + + def _to_floating_ip_pools(self, obj): + pool_elements = obj['floating_ip_pools'] + return [self._to_floating_ip_pool(pool) for pool in pool_elements] + + def _to_floating_ip_pool(self, obj): + return OpenStack_1_1_FloatingIpPool(obj['name'], self.connection) + + def ex_list_floating_ip_pools(self): + """ + List available floating IP pools + + :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpPool` + """ + return self._to_floating_ip_pools( + self.connection.request('/os-floating-ip-pools').object) + + def _to_floating_ips(self, obj): + ip_elements = obj['floating_ips'] + return [self._to_floating_ip(ip) for ip in ip_elements] + + def _to_floating_ip(self, obj): + return OpenStack_1_1_FloatingIpAddress(id=obj['id'], + ip_address=obj['ip'], + pool=None, + node_id=obj['instance_id'], + driver=self) + + def ex_list_floating_ips(self): + """ + List floating IPs + + :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress` + """ + return self._to_floating_ips( + self.connection.request('/os-floating-ips').object) + + def ex_get_floating_ip(self, ip): + """ + Get specified floating IP + + :param ip: floating IP to get + :type ip: ``str`` + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + floating_ips = self.ex_list_floating_ips() + ip_obj, = [x for x in floating_ips if x.ip_address == ip] + return ip_obj + + def ex_create_floating_ip(self): + """ + Create new floating IP + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + resp = self.connection.request('/os-floating-ips', + method='POST', + data={}) + data = resp.object['floating_ip'] + id = data['id'] + ip_address = data['ip'] + return OpenStack_1_1_FloatingIpAddress(id=id, + ip_address=ip_address, + pool=None, + node_id=None, + driver=self) + + def ex_delete_floating_ip(self, ip): + """ + Delete specified floating IP + + :param ip: floating IP to remove + :type ip: :class:`OpenStack_1_1_FloatingIpAddress` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-floating-ips/%s' % ip.id, + method='DELETE') + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + + def ex_attach_floating_ip_to_node(self, node, ip): + """ + Attach the floating IP to the node + + :param node: node + :type node: :class:`Node` + + :param ip: floating IP to attach + :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress` + + :rtype: ``bool`` + """ + address = ip.ip_address if hasattr(ip, 'ip_address') else ip + data = { + 'addFloatingIp': {'address': address} + } + resp = self.connection.request('/servers/%s/action' % node.id, + method='POST', data=data) + return resp.status == httplib.ACCEPTED + + def ex_detach_floating_ip_from_node(self, node, ip): + """ + Detach the floating IP from the node + + :param node: node + :type node: :class:`Node` + + :param ip: floating IP to remove + :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress` + + :rtype: ``bool`` + """ + address = ip.ip_address if hasattr(ip, 'ip_address') else ip + data = { + 'removeFloatingIp': {'address': address} + } + resp = self.connection.request('/servers/%s/action' % node.id, + method='POST', data=data) + return resp.status == httplib.ACCEPTED + + def ex_get_metadata_for_node(self, node): + """ + Return the metadata associated with the node. + + :param node: Node instance + :type node: :class:`Node` + + :return: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + :type tags: ``dict`` + """ + return node.extra['metadata'] + + def ex_pause_node(self, node): + uri = '/servers/%s/action' % (node.id) + data = {'pause': None} + resp = self.connection.request(uri, method='POST', data=data) + return resp.status == httplib.ACCEPTED + + def ex_unpause_node(self, node): + uri = '/servers/%s/action' % (node.id) + data = {'unpause': None} + resp = self.connection.request(uri, method='POST', data=data) + return resp.status == httplib.ACCEPTED + + def ex_suspend_node(self, node): + uri = '/servers/%s/action' % (node.id) + data = {'suspend': None} + resp = self.connection.request(uri, method='POST', data=data) + return resp.status == httplib.ACCEPTED + + def ex_resume_node(self, node): + uri = '/servers/%s/action' % (node.id) + data = {'resume': None} + resp = self.connection.request(uri, method='POST', data=data) + return resp.status == httplib.ACCEPTED + + +class OpenStack_1_1_FloatingIpPool(object): + """ + Floating IP Pool info. + """ + + def __init__(self, name, connection): + self.name = name + self.connection = connection + + def list_floating_ips(self): + """ + List floating IPs in the pool + + :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress` + """ + return self._to_floating_ips( + self.connection.request('/os-floating-ips').object) + + def _to_floating_ips(self, obj): + ip_elements = obj['floating_ips'] + return [self._to_floating_ip(ip) for ip in ip_elements] + + def _to_floating_ip(self, obj): + return OpenStack_1_1_FloatingIpAddress(id=obj['id'], + ip_address=obj['ip'], + pool=self, + node_id=obj['instance_id'], + driver=self.connection.driver) + + def get_floating_ip(self, ip): + """ + Get specified floating IP from the pool + + :param ip: floating IP to get + :type ip: ``str`` + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + ip_obj, = [x for x in self.list_floating_ips() if x.ip_address == ip] + return ip_obj + + def create_floating_ip(self): + """ + Create new floating IP in the pool + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + resp = self.connection.request('/os-floating-ips', + method='POST', + data={'pool': self.name}) + data = resp.object['floating_ip'] + id = data['id'] + ip_address = data['ip'] + return OpenStack_1_1_FloatingIpAddress(id=id, + ip_address=ip_address, + pool=self, + node_id=None, + driver=self.connection.driver) + + def delete_floating_ip(self, ip): + """ + Delete specified floating IP from the pool + + :param ip: floating IP to remove + :type ip::class:`OpenStack_1_1_FloatingIpAddress` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-floating-ips/%s' % ip.id, + method='DELETE') + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + + def __repr__(self): + return ('' % self.name) + + +class OpenStack_1_1_FloatingIpAddress(object): + """ + Floating IP info. + """ + + def __init__(self, id, ip_address, pool, node_id=None, driver=None): + self.id = str(id) + self.ip_address = ip_address + self.pool = pool + self.node_id = node_id + self.driver = driver + + def delete(self): + """ + Delete this floating IP + + :rtype: ``bool`` + """ + if self.pool is not None: + return self.pool.delete_floating_ip(self) + elif self.driver is not None: + return self.driver.ex_delete_floating_ip(self) + + def __repr__(self): + return ('' + % (self.id, self.ip_address, self.pool, self.driver)) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/opsource.py libcloud-0.15.1/libcloud/compute/drivers/opsource.py --- libcloud-0.5.0/libcloud/compute/drivers/opsource.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/opsource.py 2014-06-11 14:27:59.000000000 +0000 @@ -15,34 +15,46 @@ """ Opsource Driver """ -import base64 -from xml.etree import ElementTree as ET -from libcloud.utils import fixxpath, findtext, findall -from libcloud.common.base import ConnectionUserAndKey, Response -from libcloud.common.types import LibcloudError, InvalidCredsError, MalformedResponseError -from libcloud.compute.types import NodeState, Provider -from libcloud.compute.base import NodeDriver, Node, NodeAuthPassword +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from base64 import b64encode + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +from libcloud.compute.base import NodeDriver, Node from libcloud.compute.base import NodeSize, NodeImage, NodeLocation +from libcloud.common.types import LibcloudError, InvalidCredsError +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.utils.xml import fixxpath, findtext, findall +from libcloud.compute.types import NodeState, Provider # Roadmap / TODO: # # 0.1 - Basic functionality: create, delete, start, stop, reboot - servers -# (base OS images only, no customer images suported yet) +# (base OS images only, no customer images supported yet) # x implement list_nodes() -# x implement create_node() (only support Base OS images, no customer images yet) +# x implement create_node() (only support Base OS images, +# no customer images yet) # x implement reboot() # x implement destroy_node() # x implement list_sizes() -# x implement list_images() (only support Base OS images, no customer images yet) +# x implement list_images() (only support Base OS images, +# no customer images yet) # x implement list_locations() -# x implement ex_* extension functions for opsource-specific features +# x implement ex_* extension functions for opsource-specific featurebody # x ex_graceful_shutdown # x ex_start_node # x ex_power_off # x ex_list_networks (needed for create_node()) -# x refactor: switch to using fixxpath() from the vcloud driver for dealing with xml namespace tags -# x refactor: move some functionality from OpsourceConnection.request() method into new .request_with_orgId() method +# x refactor: switch to using fixxpath() from the vcloud driver for +# dealing with xml namespace tags +# x refactor: move some functionality from OpsourceConnection.request() +# method into new .request_with_orgId() method # x add OpsourceStatus object support to: # x _to_node() # x _to_network() @@ -55,7 +67,8 @@ # - delete customer images # - modify customer images # - add "pending-servers" in list_nodes() -# - implement various ex_* extension functions for opsource-specific features +# - implement various ex_* extension functions for opsource-specific +# features # - ex_modify_server() # - ex_add_storage_to_server() # - ex_snapshot_server() (create's customer image) @@ -67,49 +80,41 @@ # 1.0 - Opsource 0.9 API feature complete, tested # setup a few variables to represent all of the opsource cloud namespaces -NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" -ORGANIZATION_NS = NAMESPACE_BASE + "/organization" -SERVER_NS = NAMESPACE_BASE + "/server" -NETWORK_NS = NAMESPACE_BASE + "/network" -DIRECTORY_NS = NAMESPACE_BASE + "/directory" -RESET_NS = NAMESPACE_BASE + "/reset" -VIP_NS = NAMESPACE_BASE + "/vip" +NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" +ORGANIZATION_NS = NAMESPACE_BASE + "/organization" +SERVER_NS = NAMESPACE_BASE + "/server" +NETWORK_NS = NAMESPACE_BASE + "/network" +DIRECTORY_NS = NAMESPACE_BASE + "/directory" +RESET_NS = NAMESPACE_BASE + "/reset" +VIP_NS = NAMESPACE_BASE + "/vip" IMAGEIMPORTEXPORT_NS = NAMESPACE_BASE + "/imageimportexport" -DATACENTER_NS = NAMESPACE_BASE + "/datacenter" -SUPPORT_NS = NAMESPACE_BASE + "/support" -GENERAL_NS = NAMESPACE_BASE + "/general" -IPPLAN_NS = NAMESPACE_BASE + "/ipplan" -WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel" +DATACENTER_NS = NAMESPACE_BASE + "/datacenter" +SUPPORT_NS = NAMESPACE_BASE + "/support" +GENERAL_NS = NAMESPACE_BASE + "/general" +IPPLAN_NS = NAMESPACE_BASE + "/ipplan" +WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel" -class OpsourceResponse(Response): - - def parse_body(self): - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError("Failed to parse XML", body=self.body, driver=OpsourceNodeDriver) - return body +class OpsourceResponse(XmlResponse): def parse_error(self): - if self.status == 401: + if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError(self.body) - - if self.status == 403: + elif self.status == httplib.FORBIDDEN: raise InvalidCredsError(self.body) - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError("Failed to parse XML", body=self.body, driver=OpsourceNodeDriver) + body = self.parse_body() - if self.status == 400: + if self.status == httplib.BAD_REQUEST: code = findtext(body, 'resultCode', SERVER_NS) message = findtext(body, 'resultDetail', SERVER_NS) - raise OpsourceAPIException(code, message, driver=OpsourceNodeDriver) + raise OpsourceAPIException(code, + message, + driver=OpsourceNodeDriver) return self.body + class OpsourceAPIException(LibcloudError): def __init__(self, code, msg, driver): self.code = code @@ -120,7 +125,9 @@ return "%s: %s" % (self.code, self.msg) def __repr__(self): - return "" % (self.code, self.msg) + return ("" % + (self.code, self.msg)) + class OpsourceConnection(ConnectionUserAndKey): """ @@ -133,46 +140,53 @@ _orgId = None responseCls = OpsourceResponse + allow_insecure = False + def add_default_headers(self, headers): - headers['Authorization'] = ('Basic %s' - % (base64.b64encode('%s:%s' % (self.user_id, self.key)))) + headers['Authorization'] = \ + ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, + self.key))).decode('utf-8')) return headers - def request(self, action, params=None, data='', headers=None, method='GET'): + def request(self, action, params=None, data='', + headers=None, method='GET'): action = "%s/%s/%s" % (self.api_path, self.api_version, action) return super(OpsourceConnection, self).request( action=action, params=params, data=data, - method=method, headers=headers - ) + method=method, headers=headers) - def request_with_orgId(self, action, params=None, data='', headers=None, method='GET'): + def request_with_orgId(self, action, params=None, data='', + headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path(), action) return super(OpsourceConnection, self).request( action=action, params=params, data=data, - method=method, headers=headers - ) + method=method, headers=headers) def get_resource_path(self): - """this method returns a resource path which is necessary for referencing - resources that require a full path instead of just an ID, such as - networks, and customer snapshots. """ - return ("%s/%s/%s" % (self.api_path, self.api_version, self._get_orgId())) + This method returns a resource path which is necessary for referencing + resources that require a full path instead of just an ID, such as + networks, and customer snapshots. + """ + return ("%s/%s/%s" % (self.api_path, self.api_version, + self._get_orgId())) def _get_orgId(self): """ - send the /myaccount API request to opsource cloud and parse the 'orgId' from the - XML response object. We need the orgId to use most of the other API functions + Send the /myaccount API request to opsource cloud and parse the + 'orgId' from the XML response object. We need the orgId to use most + of the other API functions """ - if self._orgId == None: + if self._orgId is None: body = self.request('myaccount').object self._orgId = findtext(body, 'orgId', DIRECTORY_NS) return self._orgId + class OpsourceStatus(object): """ Opsource API pending operation status class @@ -180,8 +194,9 @@ step.name, step.number, step.percentComplete, failureReason, """ def __init__(self, action=None, requestTime=None, userName=None, - numberOfSteps=None, updateTime=None, step_name=None, - step_number=None, step_percentComplete=None, failureReason=None): + numberOfSteps=None, updateTime=None, step_name=None, + step_number=None, step_percentComplete=None, + failureReason=None): self.action = action self.requestTime = requestTime self.userName = userName @@ -193,21 +208,23 @@ self.failureReason = failureReason def __repr__(self): - return (('') + return (('') % (self.id, self.name, self.description, self.location, self.privateNet, self.multicast)) class OpsourceNodeDriver(NodeDriver): """ - Opsource node driver + Opsource node driver. """ connectionCls = OpsourceConnection - - type = Provider.OPSOURCE name = 'Opsource' - - features = {"create_node": ["password"]} - - def list_nodes(self): - nodes = self._to_nodes(self.connection.request_with_orgId('server/deployed').object) - nodes.extend(self._to_nodes(self.connection.request_with_orgId('server/pendingDeploy').object)) - return nodes - - def list_sizes(self, location=None): - return [ NodeSize(id=1, - name="default", - ram=0, - disk=0, - bandwidth=0, - price=0, - driver=self.connection.driver) ] - - def list_images(self, location=None): - """return a list of available images - Currently only returns the default 'base OS images' provided by opsource. - Customer images (snapshots) are not yet supported. - """ - return self._to_base_images(self.connection.request('base/image').object) - - def list_locations(self): - """list locations (datacenters) available for instantiating servers and - networks. - """ - return self._to_locations(self.connection.request_with_orgId('datacenter').object) + website = 'http://www.opsource.net/' + type = Provider.OPSOURCE + features = {'create_node': ['password']} def create_node(self, **kwargs): - """Create a new opsource node - - Standard keyword arguments from L{NodeDriver.create_node}: - @keyword name: String with a name for this new node (required) - @type name: str - - @keyword image: OS Image to boot on node. (required) - @type image: L{NodeImage} - - @keyword auth: Initial authentication information for the node (required) - @type auth: L{NodeAuthPassword} - - Non-standard keyword arguments: - @keyword ex_description: description for this node (required) - @type ex_description: C{str} + """ + Create a new opsource node - @keyword ex_network: Network to create the node within (required) - @type ex_network: L{OpsourceNetwork} + :keyword name: String with a name for this new node (required) + :type name: ``str`` - @keyword ex_isStarted: Start server after creation? default true (required) - @type ex_isStarted: C{bool} + :keyword image: OS Image to boot on node. (required) + :type image: :class:`NodeImage` - @return: The newly created L{Node}. NOTE: Opsource does not provide a way to - determine the ID of the server that was just created, so the returned - L{Node} is not guaranteed to be the same one that was created. This - is only the case when multiple nodes with the same name exist. + :keyword auth: Initial authentication information for the + node (required) + :type auth: :class:`NodeAuthPassword` + + :keyword ex_description: description for this node (required) + :type ex_description: ``str`` + + :keyword ex_network: Network to create the node within (required) + :type ex_network: :class:`OpsourceNetwork` + + :keyword ex_isStarted: Start server after creation? default + true (required) + :type ex_isStarted: ``bool`` + + :return: The newly created :class:`Node`. NOTE: Opsource does not + provide a + way to determine the ID of the server that was just created, + so the returned :class:`Node` is not guaranteed to be the same + one that was created. This is only the case when multiple + nodes with the same name exist. + :rtype: :class:`Node` """ name = kwargs['name'] image = kwargs['image'] - # XXX: Node sizes can be adjusted after a node is created, but cannot be - # set at create time because size is part of the image definition. + # XXX: Node sizes can be adjusted after a node is created, but + # cannot be set at create time because size is part of the + # image definition. password = None - if kwargs.has_key('auth'): - auth = kwargs.get('auth') - if isinstance(auth, NodeAuthPassword): - password = auth.password - else: - raise ValueError('auth must be of NodeAuthPassword type') + auth = self._get_and_check_auth(kwargs.get('auth')) + password = auth.password ex_description = kwargs.get('ex_description', '') ex_isStarted = kwargs.get('ex_isStarted', True) @@ -309,13 +299,15 @@ ex_network = kwargs.get('ex_network') if not isinstance(ex_network, OpsourceNetwork): raise ValueError('ex_network must be of OpsourceNetwork type') - vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(), ex_network.id) + vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(), + ex_network.id) imageResourcePath = None - if image.extra.has_key('resourcePath'): + if 'resourcePath' in image.extra: imageResourcePath = image.extra['resourcePath'] else: - imageResourcePath = "%s/%s" % (self.connection.get_resource_path(), image.id) + imageResourcePath = "%s/%s" % (self.connection.get_resource_path(), + image.id) server_elm = ET.Element('Server', {'xmlns': SERVER_NS}) ET.SubElement(server_elm, "name").text = name @@ -327,68 +319,201 @@ self.connection.request_with_orgId('server', method='POST', - data=ET.tostring(server_elm) - ).object + data=ET.tostring(server_elm)).object + # XXX: return the last node in the list that has a matching name. this # is likely but not guaranteed to be the node we just created # because opsource allows multiple nodes to have the same name - return filter(lambda x: x.name == name, self.list_nodes())[-1] + node = list(filter(lambda x: x.name == name, self.list_nodes()))[-1] + + if getattr(auth, "generated", False): + node.extra['password'] = auth.password + + return node + + def destroy_node(self, node): + body = self.connection.request_with_orgId( + 'server/%s?delete' % (node.id)).object - def reboot_node(self, node): - """reboots the node""" - body = self.connection.request_with_orgId('server/%s?restart' % node.id).object result = findtext(body, 'result', GENERAL_NS) return result == 'SUCCESS' - def destroy_node(self, node): - """Destroys the node""" - body = self.connection.request_with_orgId('server/%s?delete' % node.id).object + def reboot_node(self, node): + body = self.connection.request_with_orgId( + 'server/%s?restart' % (node.id)).object result = findtext(body, 'result', GENERAL_NS) return result == 'SUCCESS' + def list_nodes(self): + nodes = self._to_nodes( + self.connection.request_with_orgId('server/deployed').object) + nodes.extend(self._to_nodes( + self.connection.request_with_orgId('server/pendingDeploy').object)) + return nodes + + def list_images(self, location=None): + """ + return a list of available images + Currently only returns the default 'base OS images' provided by + opsource. Customer images (snapshots) are not yet supported. + + @inherits: :class:`NodeDriver.list_images` + """ + return self._to_base_images( + self.connection.request('base/image').object) + + def list_sizes(self, location=None): + return [ + NodeSize(id=1, + name="default", + ram=0, + disk=0, + bandwidth=0, + price=0, + driver=self.connection.driver), + ] + + def list_locations(self): + """ + list locations (datacenters) available for instantiating servers and + networks. + + @inherits: :class:`NodeDriver.list_locations` + """ + return self._to_locations( + self.connection.request_with_orgId('datacenter').object) + + def list_networks(self, location=None): + """ + List networks deployed across all data center locations for your + organization. The response includes the location of each network. + + + :keyword location: The location + :type location: :class:`NodeLocation` + + :return: a list of OpsourceNetwork objects + :rtype: ``list`` of :class:`OpsourceNetwork` + """ + return self._to_networks( + self.connection.request_with_orgId('networkWithLocation').object) + + def _to_base_images(self, object): + images = [] + for element in object.findall(fixxpath("ServerImage", SERVER_NS)): + images.append(self._to_base_image(element)) + + return images + + def _to_base_image(self, element): + # Eventually we will probably need multiple _to_image() functions + # that parse differently than . + # DeployedImages are customer snapshot images, and ServerImages are + # 'base' images provided by opsource + location_id = findtext(element, 'location', SERVER_NS) + location = self.ex_get_location_by_id(location_id) + + extra = { + 'description': findtext(element, 'description', SERVER_NS), + 'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS), + 'OS_displayName': findtext(element, 'operatingSystem/displayName', + SERVER_NS), + 'cpuCount': findtext(element, 'cpuCount', SERVER_NS), + 'resourcePath': findtext(element, 'resourcePath', SERVER_NS), + 'memory': findtext(element, 'memory', SERVER_NS), + 'osStorage': findtext(element, 'osStorage', SERVER_NS), + 'additionalStorage': findtext(element, 'additionalStorage', + SERVER_NS), + 'created': findtext(element, 'created', SERVER_NS), + 'location': location, + } + + return NodeImage(id=str(findtext(element, 'id', SERVER_NS)), + name=str(findtext(element, 'name', SERVER_NS)), + extra=extra, + driver=self.connection.driver) + def ex_start_node(self, node): - """Powers on an existing deployed server""" - body = self.connection.request_with_orgId('server/%s?start' % node.id).object + """ + Powers on an existing deployed server + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + body = self.connection.request_with_orgId( + 'server/%s?start' % node.id).object result = findtext(body, 'result', GENERAL_NS) return result == 'SUCCESS' def ex_shutdown_graceful(self, node): - """This function will attempt to "gracefully" stop a server by initiating a - shutdown sequence within the guest operating system. A successful response - on this function means the system has successfully passed the - request into the operating system. """ - body = self.connection.request_with_orgId('server/%s?shutdown' % node.id).object + This function will attempt to "gracefully" stop a server by + initiating a shutdown sequence within the guest operating system. + A successful response on this function means the system has + successfully passed the request into the operating system. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + body = self.connection.request_with_orgId( + 'server/%s?shutdown' % (node.id)).object result = findtext(body, 'result', GENERAL_NS) return result == 'SUCCESS' def ex_power_off(self, node): - """This function will abruptly power-off a server. Unlike ex_shutdown_graceful, - success ensures the node will stop but some OS and application configurations may - be adversely affected by the equivalent of pulling the power plug out of the - machine. """ - body = self.connection.request_with_orgId('server/%s?poweroff' % node.id).object + This function will abruptly power-off a server. Unlike + ex_shutdown_graceful, success ensures the node will stop but some OS + and application configurations may be adversely affected by the + equivalent of pulling the power plug out of the machine. + + :param node: Node which should be used + :type node: :class:`Node` + + :rtype: ``bool`` + """ + body = self.connection.request_with_orgId( + 'server/%s?poweroff' % node.id).object result = findtext(body, 'result', GENERAL_NS) return result == 'SUCCESS' def ex_list_networks(self): - """List networks deployed across all data center locations for your + """ + List networks deployed across all data center locations for your organization. The response includes the location of each network. - Returns a list of OpsourceNetwork objects + :return: a list of OpsourceNetwork objects + :rtype: ``list`` of :class:`OpsourceNetwork` """ - return self._to_networks(self.connection.request_with_orgId('networkWithLocation').object) + response = self.connection.request_with_orgId('networkWithLocation') \ + .object + return self._to_networks(response) def ex_get_location_by_id(self, id): + """ + Get location by ID. + + :param id: ID of the node location which should be used + :type id: ``str`` + + :rtype: :class:`NodeLocation` + """ location = None if id is not None: - location = filter(lambda x: x.id == id, self.list_locations())[0] + location = list( + filter(lambda x: x.id == id, self.list_locations()))[0] return location def _to_networks(self, object): - node_elements = findall(object, 'network', NETWORK_NS) - return [ self._to_network(el) for el in node_elements ] + networks = [] + for element in findall(object, 'network', NETWORK_NS): + networks.append(self._to_network(element)) + + return networks def _to_network(self, element): multicast = False @@ -402,15 +527,20 @@ return OpsourceNetwork(id=findtext(element, 'id', NETWORK_NS), name=findtext(element, 'name', NETWORK_NS), - description=findtext(element, 'description', NETWORK_NS), + description=findtext(element, 'description', + NETWORK_NS), location=location, - privateNet=findtext(element, 'privateNet', NETWORK_NS), + privateNet=findtext(element, 'privateNet', + NETWORK_NS), multicast=multicast, status=status) def _to_locations(self, object): - node_elements = object.findall(fixxpath('datacenter', DATACENTER_NS)) - return [ self._to_location(el) for el in node_elements ] + locations = [] + for element in object.findall(fixxpath('datacenter', DATACENTER_NS)): + locations.append(self._to_location(element)) + + return locations def _to_location(self, element): l = NodeLocation(id=findtext(element, 'location', DATACENTER_NS), @@ -421,12 +551,13 @@ def _to_nodes(self, object): node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS)) - node_elements.extend(object.findall(fixxpath('PendingDeployServer', SERVER_NS))) - return [ self._to_node(el) for el in node_elements ] + node_elements.extend(object.findall( + fixxpath('PendingDeployServer', SERVER_NS))) + return [self._to_node(el) for el in node_elements] def _to_node(self, element): if findtext(element, 'isStarted', SERVER_NS) == 'true': - state = NodeState.RUNNING + state = NodeState.RUNNING else: state = NodeState.TERMINATED @@ -438,64 +569,52 @@ 'networkId': findtext(element, 'networkId', SERVER_NS), 'machineName': findtext(element, 'machineName', SERVER_NS), 'deployedTime': findtext(element, 'deployedTime', SERVER_NS), - 'cpuCount': findtext(element, 'machineSpecification/cpuCount', SERVER_NS), - 'memoryMb': findtext(element, 'machineSpecification/memoryMb', SERVER_NS), - 'osStorageGb': findtext(element, 'machineSpecification/osStorageGb', SERVER_NS), - 'additionalLocalStorageGb': findtext(element, 'machineSpecification/additionalLocalStorageGb', SERVER_NS), - 'OS_type': findtext(element, 'machineSpecification/operatingSystem/type', SERVER_NS), - 'OS_displayName': findtext(element, 'machineSpecification/operatingSystem/displayName', SERVER_NS), + 'cpuCount': findtext(element, 'machineSpecification/cpuCount', + SERVER_NS), + 'memoryMb': findtext(element, 'machineSpecification/memoryMb', + SERVER_NS), + 'osStorageGb': findtext(element, + 'machineSpecification/osStorageGb', + SERVER_NS), + 'additionalLocalStorageGb': findtext( + element, 'machineSpecification/additionalLocalStorageGb', + SERVER_NS), + 'OS_type': findtext(element, + 'machineSpecification/operatingSystem/type', + SERVER_NS), + 'OS_displayName': findtext( + element, 'machineSpecification/operatingSystem/displayName', + SERVER_NS), 'status': status, } + public_ip = findtext(element, 'publicIpAddress', SERVER_NS) + n = Node(id=findtext(element, 'id', SERVER_NS), name=findtext(element, 'name', SERVER_NS), state=state, - public_ip="unknown", - private_ip=findtext(element, 'privateIpAddress', SERVER_NS), + public_ips=[public_ip] if public_ip is not None else [], + private_ips=findtext(element, 'privateIpAddress', SERVER_NS), driver=self.connection.driver, extra=extra) return n - def _to_base_images(self, object): - node_elements = object.findall(fixxpath("ServerImage", SERVER_NS)) - return [ self._to_base_image(el) for el in node_elements ] - - def _to_base_image(self, element): - # Eventually we will probably need multiple _to_image() functions - # that parse differently than . - # DeployedImages are customer snapshot images, and ServerImages are - # 'base' images provided by opsource - location_id = findtext(element, 'location', SERVER_NS) - location = self.ex_get_location_by_id(location_id) - - extra = { - 'description': findtext(element, 'description', SERVER_NS), - 'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS), - 'OS_displayName': findtext(element, 'operatingSystem/displayName', SERVER_NS), - 'cpuCount': findtext(element, 'cpuCount', SERVER_NS), - 'resourcePath': findtext(element, 'resourcePath', SERVER_NS), - 'memory': findtext(element, 'memory', SERVER_NS), - 'osStorage': findtext(element, 'osStorage', SERVER_NS), - 'additionalStorage': findtext(element, 'additionalStorage', SERVER_NS), - 'created': findtext(element, 'created', SERVER_NS), - 'location': location, - } - - i = NodeImage(id=str(findtext(element, 'id', SERVER_NS)), - name=str(findtext(element, 'name', SERVER_NS)), - extra=extra, - driver=self.connection.driver) - return i - def _to_status(self, element): - if element == None: + if element is None: return OpsourceStatus() s = OpsourceStatus(action=findtext(element, 'action', SERVER_NS), - requestTime=findtext(element, 'requestTime', SERVER_NS), - userName=findtext(element, 'userName', SERVER_NS), - numberOfSteps=findtext(element, 'numberOfSteps', SERVER_NS), - step_name=findtext(element, 'step/name', SERVER_NS), - step_number=findtext(element, 'step_number', SERVER_NS), - step_percentComplete=findtext(element, 'step/percentComplete', SERVER_NS), - failureReason=findtext(element, 'failureReason', SERVER_NS)) + requestTime=findtext(element, 'requestTime', + SERVER_NS), + userName=findtext(element, 'userName', + SERVER_NS), + numberOfSteps=findtext(element, 'numberOfSteps', + SERVER_NS), + step_name=findtext(element, 'step/name', + SERVER_NS), + step_number=findtext(element, 'step_number', + SERVER_NS), + step_percentComplete=findtext( + element, 'step/percentComplete', SERVER_NS), + failureReason=findtext(element, 'failureReason', + SERVER_NS)) return s diff -Nru libcloud-0.5.0/libcloud/compute/drivers/rackspace.py libcloud-0.15.1/libcloud/compute/drivers/rackspace.py --- libcloud-0.5.0/libcloud/compute/drivers/rackspace.py 2011-05-09 21:01:31.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/rackspace.py 2014-05-26 15:42:51.000000000 +0000 @@ -1,4 +1,3 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 @@ -15,551 +14,217 @@ """ Rackspace driver """ -import os - -import base64 - -from xml.etree import ElementTree as ET -from xml.parsers.expat import ExpatError - -from libcloud.pricing import get_pricing -from libcloud.common.base import Response -from libcloud.common.types import MalformedResponseError -from libcloud.compute.types import NodeState, Provider -from libcloud.compute.base import NodeDriver, Node -from libcloud.compute.base import NodeSize, NodeImage, NodeLocation - -from libcloud.common.rackspace import ( - AUTH_HOST_US, AUTH_HOST_UK, RackspaceBaseConnection) - -NAMESPACE='http://docs.rackspacecloud.com/servers/api/v1.0' - - -class RackspaceResponse(Response): - - def success(self): - i = int(self.status) - return i >= 200 and i <= 299 - - def parse_body(self): - if not self.body: - return None - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError( - "Failed to parse XML", - body=self.body, - driver=RackspaceNodeDriver) - return body - def parse_error(self): - # TODO: fixup, Rackspace only uses response codes really! - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError( - "Failed to parse XML", - body=self.body, driver=RackspaceNodeDriver) - try: - text = "; ".join([ err.text or '' - for err in - body.getiterator() - if err.text]) - except ExpatError: - text = self.body - return '%s %s %s' % (self.status, self.error, text) - - -class RackspaceConnection(RackspaceBaseConnection): - """ - Connection class for the Rackspace driver - """ - - responseCls = RackspaceResponse - auth_host = AUTH_HOST_US - _url_key = "server_url" - - def __init__(self, user_id, key, secure=True): - super(RackspaceConnection, self).__init__(user_id, key, secure) - self.api_version = 'v1.0' - self.accept_format = 'application/xml' - - def request(self, action, params=None, data='', headers=None, method='GET'): - if not headers: - headers = {} - if not params: - params = {} - # Due to first-run authentication request, we may not have a path - if self.server_url: - action = self.server_url + action - if method in ("POST", "PUT"): - headers = {'Content-Type': 'application/xml; charset=UTF-8'} - if method == "GET": - params['cache-busting'] = os.urandom(8).encode('hex') - return super(RackspaceConnection, self).request( - action=action, - params=params, data=data, - method=method, headers=headers - ) - - -class RackspaceSharedIpGroup(object): - """ - Shared IP group info. - """ - - def __init__(self, id, name, servers=None): - self.id = str(id) - self.name = name - self.servers = servers - - -class RackspaceNodeIpAddresses(object): - """ - List of public and private IP addresses of a Node. - """ - - def __init__(self, public_addresses, private_addresses): - self.public_addresses = public_addresses - self.private_addresses = private_addresses +from libcloud.compute.types import Provider, LibcloudError +from libcloud.compute.base import NodeLocation, VolumeSnapshot +from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\ + OpenStack_1_0_NodeDriver, OpenStack_1_0_Response +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\ + OpenStack_1_1_NodeDriver + +from libcloud.common.rackspace import AUTH_URL + + +ENDPOINT_ARGS_MAP = { + 'dfw': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'DFW'}, + 'ord': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'ORD'}, + 'iad': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'IAD'}, + 'lon': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'LON'}, + 'syd': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'SYD'}, + 'hkg': {'service_type': 'compute', + 'name': 'cloudServersOpenStack', + 'region': 'HKG'}, + +} + + +class RackspaceFirstGenConnection(OpenStack_1_0_Connection): + """ + Connection class for the Rackspace first-gen driver. + """ + responseCls = OpenStack_1_0_Response + XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' + auth_url = AUTH_URL + _auth_version = '2.0' + cache_busting = True + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + super(RackspaceFirstGenConnection, self).__init__(*args, **kwargs) + + def get_endpoint(self): + ep = {} + + if '2.0' in self._auth_version: + ep = self.service_catalog.get_endpoint(service_type='compute', + name='cloudServers') + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + public_url = ep.get('publicURL', None) -class RackspaceNodeDriver(NodeDriver): - """ - Rackspace node driver. + if not public_url: + raise LibcloudError('Could not find specified endpoint') - Extra node attributes: - - password: root password, available after create. - - hostId: represents the host your cloud server runs on - - imageId: id of image - - flavorId: id of flavor - """ - connectionCls = RackspaceConnection - type = Provider.RACKSPACE + # This is a nasty hack, but it's required because of how the + # auth system works. + # Old US accounts can access UK API endpoint, but they don't + # have this endpoint in the service catalog. Same goes for the + # old UK accounts and US endpoint. + if self.region == 'us': + # Old UK account, which only have uk endpoint in the catalog + public_url = public_url.replace('https://lon.servers.api', + 'https://servers.api') + elif self.region == 'uk': + # Old US account, which only has us endpoints in the catalog + public_url = public_url.replace('https://servers.api', + 'https://lon.servers.api') + + return public_url + + +class RackspaceFirstGenNodeDriver(OpenStack_1_0_NodeDriver): + name = 'Rackspace Cloud (First Gen)' + website = 'http://www.rackspace.com' + connectionCls = RackspaceFirstGenConnection + type = Provider.RACKSPACE_FIRST_GEN api_name = 'rackspace' - name = 'Rackspace' - - _rackspace_prices = get_pricing(driver_type='compute', - driver_name='rackspace') - - features = {"create_node": ["generates_password"]} - NODE_STATE_MAP = { 'BUILD': NodeState.PENDING, - 'REBUILD': NodeState.PENDING, - 'ACTIVE': NodeState.RUNNING, - 'SUSPENDED': NodeState.TERMINATED, - 'QUEUE_RESIZE': NodeState.PENDING, - 'PREP_RESIZE': NodeState.PENDING, - 'VERIFY_RESIZE': NodeState.RUNNING, - 'PASSWORD': NodeState.PENDING, - 'RESCUE': NodeState.PENDING, - 'REBUILD': NodeState.PENDING, - 'REBOOT': NodeState.REBOOTING, - 'HARD_REBOOT': NodeState.REBOOTING, - 'SHARE_IP': NodeState.PENDING, - 'SHARE_IP_NO_CONFIG': NodeState.PENDING, - 'DELETE_IP': NodeState.PENDING, - 'UNKNOWN': NodeState.UNKNOWN} - - def list_nodes(self): - return self._to_nodes(self.connection.request('/servers/detail').object) - - def list_sizes(self, location=None): - return self._to_sizes(self.connection.request('/flavors/detail').object) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us', **kwargs): + """ + @inherits: :class:`NodeDriver.__init__` - def list_images(self, location=None): - return self._to_images(self.connection.request('/images/detail').object) + :param region: Region ID which should be used + :type region: ``str`` + """ + if region not in ['us', 'uk']: + raise ValueError('Invalid region: %s' % (region)) + + super(RackspaceFirstGenNodeDriver, self).__init__(key=key, + secret=secret, + secure=secure, + host=host, + port=port, + region=region, + **kwargs) def list_locations(self): - """Lists available locations + """ + Lists available locations Locations cannot be set or retrieved via the API, but currently there are two locations, DFW and ORD. - """ - return [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] - - def _change_password_or_name(self, node, name=None, password=None): - uri = '/servers/%s' % (node.id) - if not name: - name = node.name - - body = { 'xmlns': NAMESPACE, - 'name': name} + @inherits: :class:`OpenStack_1_0_NodeDriver.list_locations` + """ + if self.region == 'us': + locations = [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] + elif self.region == 'uk': + locations = [NodeLocation(0, 'Rackspace UK London', 'UK', self)] - if password != None: - body['adminPass'] = password + return locations - server_elm = ET.Element('server', body) + def _ex_connection_class_kwargs(self): + kwargs = self.openstack_connection_kwargs() + kwargs['region'] = self.region + return kwargs - resp = self.connection.request( - uri, method='PUT', data=ET.tostring(server_elm)) - if resp.status == 204 and password != None: - node.extra['password'] = password +class RackspaceConnection(OpenStack_1_1_Connection): + """ + Connection class for the Rackspace next-gen OpenStack base driver. + """ - return resp.status == 204 + auth_url = AUTH_URL + _auth_version = '2.0' - def ex_set_password(self, node, password): - """ - Sets the Node's root password. + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) + super(RackspaceConnection, self).__init__(*args, **kwargs) - This will reboot the instance to complete the operation. + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'RackspaceConnection must have get_endpoint_args set') - L{node.extra['password']} will be set to the new value if the - operation was successful. - """ - return self._change_password_or_name(node, password=password) + if '2.0' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) - def ex_set_server_name(self, node, name): - """ - Sets the Node's name. + public_url = ep.get('publicURL', None) - This will reboot the instance to complete the operation. - """ - return self._change_password_or_name(node, name=name) + if not public_url: + raise LibcloudError('Could not find specified endpoint') - def create_node(self, **kwargs): - """Create a new rackspace node + return public_url - See L{NodeDriver.create_node} for more keyword args. - @keyword ex_metadata: Key/Value metadata to associate with a node - @type ex_metadata: C{dict} - @keyword ex_files: File Path => File contents to create on the node - @type ex_files: C{dict} - """ - name = kwargs['name'] - image = kwargs['image'] - size = kwargs['size'] - server_elm = ET.Element( - 'server', - {'xmlns': NAMESPACE, - 'name': name, - 'imageId': str(image.id), - 'flavorId': str(size.id)} - ) - - metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {})) - if metadata_elm: - server_elm.append(metadata_elm) - - files_elm = self._files_to_xml(kwargs.get("ex_files", {})) - if files_elm: - server_elm.append(files_elm) - - shared_ip_elm = self._shared_ip_group_to_xml( - kwargs.get("ex_shared_ip_group", None)) - if shared_ip_elm: - server_elm.append(shared_ip_elm) - - resp = self.connection.request("/servers", - method='POST', - data=ET.tostring(server_elm)) - return self._to_node(resp.object) - - def ex_rebuild(self, node_id, image_id): - elm = ET.Element( - 'rebuild', - {'xmlns': NAMESPACE, - 'imageId': image_id, - } - ) - resp = self.connection.request("/servers/%s/action" % node_id, - method='POST', - data=ET.tostring(elm)) - return resp.status == 202 - - def ex_create_ip_group(self, group_name, node_id=None): - group_elm = ET.Element( - 'sharedIpGroup', - {'xmlns': NAMESPACE, - 'name': group_name, - } - ) - if node_id: - ET.SubElement(group_elm, - 'server', - {'id': node_id} - ) - - resp = self.connection.request('/shared_ip_groups', - method='POST', - data=ET.tostring(group_elm)) - return self._to_shared_ip_group(resp.object) - - def ex_list_ip_groups(self, details=False): - uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups' - resp = self.connection.request(uri, - method='GET') - groups = self._findall(resp.object, 'sharedIpGroup') - return [self._to_shared_ip_group(el) for el in groups] - - def ex_delete_ip_group(self, group_id): - uri = '/shared_ip_groups/%s' % group_id - resp = self.connection.request(uri, method='DELETE') - return resp.status == 204 - - def ex_share_ip(self, group_id, node_id, ip, configure_node=True): - if configure_node: - str_configure = 'true' - else: - str_configure = 'false' +class RackspaceNodeDriver(OpenStack_1_1_NodeDriver): + name = 'Rackspace Cloud (Next Gen)' + website = 'http://www.rackspace.com' + connectionCls = RackspaceConnection + type = Provider.RACKSPACE - elm = ET.Element( - 'shareIp', - {'xmlns': NAMESPACE, - 'sharedIpGroupId' : group_id, - 'configureServer' : str_configure} - ) - - uri = '/servers/%s/ips/public/%s' % (node_id, ip) - - resp = self.connection.request(uri, - method='PUT', - data=ET.tostring(elm)) - return resp.status == 202 - - def ex_unshare_ip(self, node_id, ip): - uri = '/servers/%s/ips/public/%s' % (node_id, ip) - - resp = self.connection.request(uri, - method='DELETE') - return resp.status == 202 - - def ex_list_ip_addresses(self, node_id): - uri = '/servers/%s/ips' % node_id - resp = self.connection.request(uri, - method='GET') - return self._to_ip_addresses(resp.object) - - def _metadata_to_xml(self, metadata): - if len(metadata) == 0: - return None - - metadata_elm = ET.Element('metadata') - for k, v in metadata.items(): - meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k) }) - meta_elm.text = str(v) - - return metadata_elm - - def _files_to_xml(self, files): - if len(files) == 0: - return None - - personality_elm = ET.Element('personality') - for k, v in files.items(): - file_elm = ET.SubElement(personality_elm, - 'file', - {'path': str(k)}) - file_elm.text = base64.b64encode(v) - - return personality_elm - - def _reboot_node(self, node, reboot_type='SOFT'): - resp = self._node_action(node, ['reboot', ('type', reboot_type)]) - return resp.status == 202 - - def ex_soft_reboot_node(self, node): - return self._reboot_node(node, reboot_type='SOFT') - - def ex_hard_reboot_node(self, node): - return self._reboot_node(node, reboot_type='HARD') - - def reboot_node(self, node): - return self._reboot_node(node, reboot_type='HARD') - - def destroy_node(self, node): - uri = '/servers/%s' % (node.id) - resp = self.connection.request(uri, method='DELETE') - return resp.status == 202 - - def ex_get_node_details(self, node_id): - uri = '/servers/%s' % (node_id) - resp = self.connection.request(uri, method='GET') - if resp.status == 404: - return None - return self._to_node(resp.object) - - def _node_action(self, node, body): - if isinstance(body, list): - attr = ' '.join(['%s="%s"' % (item[0], item[1]) - for item in body[1:]]) - body = '<%s xmlns="%s" %s/>' % (body[0], NAMESPACE, attr) - uri = '/servers/%s/action' % (node.id) - resp = self.connection.request(uri, method='POST', data=body) - return resp - - def _to_nodes(self, object): - node_elements = self._findall(object, 'server') - return [ self._to_node(el) for el in node_elements ] - - def _fixxpath(self, xpath): - # ElementTree wants namespaces in its xpaths, so here we add them. - return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")]) - - def _findall(self, element, xpath): - return element.findall(self._fixxpath(xpath)) - - def _to_node(self, el): - def get_ips(el): - return [ip.get('addr') for ip in el] - - def get_meta_dict(el): - d = {} - for meta in el: - d[meta.get('key')] = meta.text - return d - - public_ip = get_ips(self._findall(el, - 'addresses/public/ip')) - private_ip = get_ips(self._findall(el, - 'addresses/private/ip')) - metadata = get_meta_dict(self._findall(el, 'metadata/meta')) - - n = Node(id=el.get('id'), - name=el.get('name'), - state=self.NODE_STATE_MAP.get( - el.get('status'), NodeState.UNKNOWN), - public_ip=public_ip, - private_ip=private_ip, - driver=self.connection.driver, - extra={ - 'password': el.get('adminPass'), - 'hostId': el.get('hostId'), - 'imageId': el.get('imageId'), - 'flavorId': el.get('flavorId'), - 'uri': "https://%s%s/servers/%s" % ( - self.connection.host, - self.connection.request_path, el.get('id')), - 'metadata': metadata, - }) - return n - - def _to_sizes(self, object): - elements = self._findall(object, 'flavor') - return [ self._to_size(el) for el in elements ] - - def _to_size(self, el): - s = NodeSize(id=el.get('id'), - name=el.get('name'), - ram=int(el.get('ram')), - disk=int(el.get('disk')), - bandwidth=None, # XXX: needs hardcode - price=self._get_size_price(el.get('id')), # Hardcoded, - driver=self.connection.driver) - return s - - def _to_images(self, object): - elements = self._findall(object, "image") - return [ self._to_image(el) - for el in elements - if el.get('status') == 'ACTIVE' ] - - def _to_image(self, el): - i = NodeImage(id=el.get('id'), - name=el.get('name'), - driver=self.connection.driver, - extra={'serverId': el.get('serverId')}) - return i + _networks_url_prefix = '/os-networksv2' - def ex_limits(self): + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='dfw', **kwargs): """ - Extra call to get account's limits, such as - rates (for example amount of POST requests per day) - and absolute limits like total amount of available - RAM to be used by servers. + @inherits: :class:`NodeDriver.__init__` - @return: C{dict} with keys 'rate' and 'absolute' + :param region: ID of the region which should be used. + :type region: ``str`` """ + valid_regions = ENDPOINT_ARGS_MAP.keys() - def _to_rate(el): - rate = {} - for item in el.items(): - rate[item[0]] = item[1] - - return rate - - def _to_absolute(el): - return {el.get('name'): el.get('value')} - - limits = self.connection.request("/limits").object - rate = [ _to_rate(el) for el in self._findall(limits, 'rate/limit') ] - absolute = {} - for item in self._findall(limits, 'absolute/limit'): - absolute.update(_to_absolute(item)) - - return {"rate": rate, "absolute": absolute} - - def ex_save_image(self, node, name): - """Create an image for node. - - @keyword node: node to use as a base for image - @param node: L{Node} - @keyword name: name for new image - @param name: C{string} - """ + if region not in valid_regions: + raise ValueError('Invalid region: %s' % (region)) - image_elm = ET.Element( - 'image', - {'xmlns': NAMESPACE, - 'name': name, - 'serverId': node.id} - ) - - return self._to_image(self.connection.request("/images", - method="POST", - data=ET.tostring(image_elm)).object) - - def _to_shared_ip_group(self, el): - servers_el = self._findall(el, 'servers') - if servers_el: - servers = [s.get('id') - for s in self._findall(servers_el[0], 'server')] + if region == 'lon': + self.api_name = 'rackspacenovalon' + elif region == 'syd': + self.api_name = 'rackspacenovasyd' else: - servers = None - return RackspaceSharedIpGroup(id=el.get('id'), - name=el.get('name'), - servers=servers) - - def _to_ip_addresses(self, el): - return RackspaceNodeIpAddresses( - [ip.get('addr') for ip in - self._findall(self._findall(el, 'public')[0], 'ip')], - [ip.get('addr') for ip in - self._findall(self._findall(el, 'private')[0], 'ip')] - ) - - def _shared_ip_group_to_xml(self, shared_ip_group): - if not shared_ip_group: - return None - - return ET.Element('sharedIpGroupId', shared_ip_group) - -class RackspaceUKConnection(RackspaceConnection): - """ - Connection class for the Rackspace UK driver - """ - auth_host = AUTH_HOST_UK - -class RackspaceUKNodeDriver(RackspaceNodeDriver): - """Driver for Rackspace in the UK (London) - """ - - name = 'Rackspace (UK)' - connectionCls = RackspaceUKConnection - - def list_locations(self): - return [NodeLocation(0, 'Rackspace UK London', 'UK', self)] - -class OpenStackConnection(RackspaceConnection): + self.api_name = 'rackspacenovaus' - def __init__(self, user_id, key, secure, host, port): - super(OpenStackConnection, self).__init__(user_id, key, secure=secure) - self.auth_host = host - self.port = (port, port) - -class OpenStackNodeDriver(RackspaceNodeDriver): - name = 'OpenStack' - connectionCls = OpenStackConnection + super(RackspaceNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + region=region, + **kwargs) + + def _to_snapshot(self, api_node): + if 'snapshot' in api_node: + api_node = api_node['snapshot'] + + extra = {'volume_id': api_node['volumeId'], + 'name': api_node['displayName'], + 'created': api_node['createdAt'], + 'description': api_node['displayDescription'], + 'status': api_node['status']} + + snapshot = VolumeSnapshot(id=api_node['id'], driver=self, + size=api_node['size'], + extra=extra) + return snapshot + + def _ex_connection_class_kwargs(self): + endpoint_args = ENDPOINT_ARGS_MAP[self.region] + kwargs = self.openstack_connection_kwargs() + kwargs['region'] = self.region + kwargs['get_endpoint_args'] = endpoint_args + return kwargs diff -Nru libcloud-0.5.0/libcloud/compute/drivers/rimuhosting.py libcloud-0.15.1/libcloud/compute/drivers/rimuhosting.py --- libcloud-0.5.0/libcloud/compute/drivers/rimuhosting.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/rimuhosting.py 2014-06-11 14:28:05.000000000 +0000 @@ -16,20 +16,19 @@ RimuHosting Driver """ try: - import json -except: import simplejson as json +except ImportError: + import json -from libcloud.common.base import ConnectionKey, Response +from libcloud.common.base import ConnectionKey, JsonResponse from libcloud.common.types import InvalidCredsError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation -from libcloud.compute.base import NodeImage, NodeAuthPassword +from libcloud.compute.base import NodeImage API_CONTEXT = '/r' API_HOST = 'rimuhosting.com' -API_PORT = (80,443) -API_SECURE = True + class RimuHostingException(Exception): """ @@ -42,35 +41,30 @@ def __repr__(self): return "" % (self.args[0]) -class RimuHostingResponse(Response): - def __init__(self, response): - self.body = response.read() - self.status = response.status - self.headers = dict(response.getheaders()) - self.error = response.reason - - if self.success(): - self.object = self.parse_body() +class RimuHostingResponse(JsonResponse): + """ + Response Class for RimuHosting driver + """ def success(self): if self.status == 403: raise InvalidCredsError() return True + def parse_body(self): try: - js = json.loads(self.body) - if js[js.keys()[0]]['response_type'] == "ERROR": + js = super(RimuHostingResponse, self).parse_body() + keys = list(js.keys()) + if js[keys[0]]['response_type'] == "ERROR": raise RimuHostingException( - js[js.keys()[0]]['human_readable_message'] + js[keys[0]]['human_readable_message'] ) - return js[js.keys()[0]] - except ValueError: - raise RimuHostingException('Could not parse body: %s' - % (self.body)) + return js[keys[0]] except KeyError: raise RimuHostingException('Could not parse body: %s' % (self.body)) + class RimuHostingConnection(ConnectionKey): """ Connection class for the RimuHosting driver @@ -78,12 +72,12 @@ api_context = API_CONTEXT host = API_HOST - port = API_PORT + port = 443 responseCls = RimuHostingResponse def __init__(self, key, secure=True): # override __init__ so that we can set secure of False for testing - ConnectionKey.__init__(self,key,secure) + ConnectionKey.__init__(self, key, secure) def add_default_headers(self, headers): # We want JSON back from the server. Could be application/xml @@ -93,9 +87,10 @@ headers['Content-Type'] = 'application/json' headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key) - return headers; + return headers - def request(self, action, params=None, data='', headers=None, method='GET'): + def request(self, action, params=None, data='', headers=None, + method='GET'): if not headers: headers = {} if not params: @@ -104,6 +99,7 @@ return ConnectionKey.request(self, self.api_context + action, params, data, headers, method) + class RimuHostingNodeDriver(NodeDriver): """ RimuHosting node driver @@ -111,40 +107,61 @@ type = Provider.RIMUHOSTING name = 'RimuHosting' + website = 'http://rimuhosting.com/' connectionCls = RimuHostingConnection + features = {'create_node': ['password']} - def __init__(self, key, host=API_HOST, port=API_PORT, - api_context=API_CONTEXT, secure=API_SECURE): + def __init__(self, key, host=API_HOST, port=443, + api_context=API_CONTEXT, secure=True): + """ + :param key: API key (required) + :type key: ``str`` + + :param host: hostname for connection + :type host: ``str`` + + :param port: Override port used for connections. + :type port: ``int`` + + :param api_context: Optional API context. + :type api_context: ``str`` + + :param secure: Weither to use HTTPS or HTTP. + :type secure: ``bool`` + + :rtype: ``None`` + """ # Pass in some extra vars so that self.key = key self.secure = secure - self.connection = self.connectionCls(key ,secure) + self.connection = self.connectionCls(key, secure) self.connection.host = host self.connection.api_context = api_context self.connection.port = port self.connection.driver = self self.connection.connect() - def _order_uri(self, node,resource): + def _order_uri(self, node, resource): # Returns the order uri with its resourse appended. - return "/orders/%s/%s" % (node.id,resource) + return "/orders/%s/%s" % (node.id, resource) # TODO: Get the node state. def _to_node(self, order): n = Node(id=order['slug'], - name=order['domain_name'], - state=NodeState.RUNNING, - public_ip=( - [order['allocated_ips']['primary_ip']] - + order['allocated_ips']['secondary_ips'] - ), - private_ip=[], - driver=self.connection.driver, - extra={'order_oid': order['order_oid'], - 'monthly_recurring_fee': order.get('billing_info').get('monthly_recurring_fee')}) + name=order['domain_name'], + state=NodeState.RUNNING, + public_ips=( + [order['allocated_ips']['primary_ip']] + + order['allocated_ips']['secondary_ips']), + private_ips=[], + driver=self.connection.driver, + extra={ + 'order_oid': order['order_oid'], + 'monthly_recurring_fee': order.get( + 'billing_info').get('monthly_recurring_fee')}) return n - def _to_size(self,plan): + def _to_size(self, plan): return NodeSize( id=plan['pricing_plan_code'], name=plan['pricing_plan_description'], @@ -155,87 +172,92 @@ driver=self.connection.driver ) - def _to_image(self,image): + def _to_image(self, image): return NodeImage(id=image['distro_code'], - name=image['distro_description'], - driver=self.connection.driver) + name=image['distro_description'], + driver=self.connection.driver) def list_sizes(self, location=None): # Returns a list of sizes (aka plans) # Get plans. Note this is really just for libcloud. # We are happy with any size. - if location == None: - location = ''; + if location is None: + location = '' else: location = ";dc_location=%s" % (location.id) - res = self.connection.request('/pricing-plans;server-type=VPS%s' % (location)).object - return map(lambda x : self._to_size(x), res['pricing_plan_infos']) + res = self.connection.request( + '/pricing-plans;server-type=VPS%s' % (location)).object + return list(map(lambda x: self._to_size(x), res['pricing_plan_infos'])) def list_nodes(self): # Returns a list of Nodes # Will only include active ones. res = self.connection.request('/orders;include_inactive=N').object - return map(lambda x : self._to_node(x), res['about_orders']) + return list(map(lambda x: self._to_node(x), res['about_orders'])) def list_images(self, location=None): # Get all base images. # TODO: add other image sources. (Such as a backup of a VPS) # All Images are available for use at all locations res = self.connection.request('/distributions').object - return map(lambda x : self._to_image(x), res['distro_infos']) + return list(map(lambda x: self._to_image(x), res['distro_infos'])) def reboot_node(self, node): # Reboot # PUT the state of RESTARTING to restart a VPS. # All data is encoded as JSON - data = {'reboot_request':{'running_state':'RESTARTING'}} - uri = self._order_uri(node,'vps/running-state') - self.connection.request(uri,data=json.dumps(data),method='PUT') + data = {'reboot_request': {'running_state': 'RESTARTING'}} + uri = self._order_uri(node, 'vps/running-state') + self.connection.request(uri, data=json.dumps(data), method='PUT') # XXX check that the response was actually successful return True def destroy_node(self, node): # Shutdown a VPS. - uri = self._order_uri(node,'vps') - self.connection.request(uri,method='DELETE') + uri = self._order_uri(node, 'vps') + self.connection.request(uri, method='DELETE') # XXX check that the response was actually successful return True def create_node(self, **kwargs): """Creates a RimuHosting instance - See L{NodeDriver.create_node} for more keyword args. + @inherits: :class:`NodeDriver.create_node` - @keyword name: Must be a FQDN. e.g example.com. - @type name: C{string} + :keyword name: Must be a FQDN. e.g example.com. + :type name: ``str`` - @keyword ex_billing_oid: If not set, a billing method is automatically picked. - @type ex_billing_oid: C{string} + :keyword ex_billing_oid: If not set, + a billing method is automatically picked. + :type ex_billing_oid: ``str`` - @keyword ex_host_server_oid: The host server to set the VPS up on. - @type ex_host_server_oid: C{string} + :keyword ex_host_server_oid: The host server to set the VPS up on. + :type ex_host_server_oid: ``str`` - @keyword ex_vps_order_oid_to_clone: Clone another VPS to use as the image for the new VPS. - @type ex_vps_order_oid_to_clone: C{string} + :keyword ex_vps_order_oid_to_clone: Clone another VPS to use as + the image for the new VPS. + :type ex_vps_order_oid_to_clone: ``str`` - @keyword ex_num_ips: Number of IPs to allocate. Defaults to 1. - @type ex_num_ips: C{int} + :keyword ex_num_ips: Number of IPs to allocate. Defaults to 1. + :type ex_num_ips: ``int`` - @keyword ex_extra_ip_reason: Reason for needing the extra IPs. - @type ex_extra_ip_reason: C{string} + :keyword ex_extra_ip_reason: Reason for needing the extra IPs. + :type ex_extra_ip_reason: ``str`` - @keyword ex_memory_mb: Memory to allocate to the VPS. - @type ex_memory_mb: C{int} + :keyword ex_memory_mb: Memory to allocate to the VPS. + :type ex_memory_mb: ``int`` - @keyword ex_disk_space_mb: Diskspace to allocate to the VPS. Defaults to 4096 (4GB). - @type ex_disk_space_mb: C{int} + :keyword ex_disk_space_mb: Diskspace to allocate to the VPS. + Defaults to 4096 (4GB). + :type ex_disk_space_mb: ``int`` - @keyword ex_disk_space_2_mb: Secondary disk size allocation. Disabled by default. - @type ex_disk_space_2_mb: C{int} + :keyword ex_disk_space_2_mb: Secondary disk size allocation. + Disabled by default. + :type ex_disk_space_2_mb: ``int`` - @keyword ex_control_panel: Control panel to install on the VPS. - @type ex_control_panel: C{string} + :keyword ex_control_panel: Control panel to install on the VPS. + :type ex_control_panel: ``str`` """ # Note we don't do much error checking in this because we # expect the API to error out if there is a problem. @@ -244,62 +266,66 @@ size = kwargs['size'] data = { - 'instantiation_options':{ - 'domain_name': name, 'distro': image.id + 'instantiation_options': { + 'domain_name': name, + 'distro': image.id }, 'pricing_plan_code': size.id, + 'vps_parameters': {} } - if kwargs.has_key('ex_control_panel'): - data['instantiation_options']['control_panel'] = kwargs['ex_control_panel'] + if 'ex_control_panel' in kwargs: + data['instantiation_options']['control_panel'] = \ + kwargs['ex_control_panel'] - if kwargs.has_key('auth'): - auth = kwargs['auth'] - if not isinstance(auth, NodeAuthPassword): - raise ValueError('auth must be of NodeAuthPassword type') - data['instantiation_options']['password'] = auth.password + auth = self._get_and_check_auth(kwargs.get('auth')) + data['instantiation_options']['password'] = auth.password - if kwargs.has_key('ex_billing_oid'): - #TODO check for valid oid. + if 'ex_billing_oid' in kwargs: + # TODO check for valid oid. data['billing_oid'] = kwargs['ex_billing_oid'] - if kwargs.has_key('ex_host_server_oid'): + if 'ex_host_server_oid' in kwargs: data['host_server_oid'] = kwargs['ex_host_server_oid'] - if kwargs.has_key('ex_vps_order_oid_to_clone'): - data['vps_order_oid_to_clone'] = kwargs['ex_vps_order_oid_to_clone'] + if 'ex_vps_order_oid_to_clone' in kwargs: + data['vps_order_oid_to_clone'] = \ + kwargs['ex_vps_order_oid_to_clone'] - if kwargs.has_key('ex_num_ips') and int(kwargs['ex_num_ips']) > 1: - if not kwargs.has_key('ex_extra_ip_reason'): - raise RimuHostingException('Need an reason for having an extra IP') + if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1: + if 'ex_extra_ip_reason' not in kwargs: + raise RimuHostingException( + 'Need an reason for having an extra IP') else: - if not data.has_key('ip_request'): + if 'ip_request' not in data: data['ip_request'] = {} data['ip_request']['num_ips'] = int(kwargs['ex_num_ips']) - data['ip_request']['extra_ip_reason'] = kwargs['ex_extra_ip_reason'] + data['ip_request']['extra_ip_reason'] = \ + kwargs['ex_extra_ip_reason'] - if kwargs.has_key('ex_memory_mb'): - if not data.has_key('vps_parameters'): - data['vps_parameters'] = {} + if 'ex_memory_mb' in kwargs: data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] - if kwargs.has_key('ex_disk_space_mb'): - if not data.has_key('ex_vps_parameters'): - data['vps_parameters'] = {} - data['vps_parameters']['disk_space_mb'] = kwargs['ex_disk_space_mb'] - - if kwargs.has_key('ex_disk_space_2_mb'): - if not data.has_key('vps_parameters'): - data['vps_parameters'] = {} - data['vps_parameters']['disk_space_2_mb'] = kwargs['ex_disk_space_2_mb'] + if 'ex_disk_space_mb' in kwargs: + data['vps_parameters']['disk_space_mb'] = \ + kwargs['ex_disk_space_mb'] + + if 'ex_disk_space_2_mb' in kwargs: + data['vps_parameters']['disk_space_2_mb'] =\ + kwargs['ex_disk_space_2_mb'] + + # Don't send empty 'vps_parameters' attribute + if not data['vps_parameters']: + del data['vps_parameters'] res = self.connection.request( '/orders/new-vps', method='POST', - data=json.dumps({"new-vps":data}) + data=json.dumps({"new-vps": data}) ).object node = self._to_node(res['about_order']) - node.extra['password'] = res['new_order_request']['instantiation_options']['password'] + node.extra['password'] = \ + res['new_order_request']['instantiation_options']['password'] return node def list_locations(self): @@ -309,5 +335,3 @@ NodeLocation('DCLONDON', "RimuHosting London", 'GB', self), NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self), ] - - features = {"create_node": ["password"]} diff -Nru libcloud-0.5.0/libcloud/compute/drivers/serverlove.py libcloud-0.15.1/libcloud/compute/drivers/serverlove.py --- libcloud-0.5.0/libcloud/compute/drivers/serverlove.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/serverlove.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +ServerLove Driver +""" + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver +from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection + + +# API end-points +API_ENDPOINTS = { + 'uk-1': { + 'name': 'United Kingdom, Manchester', + 'country': 'United Kingdom', + 'host': 'api.z1-man.serverlove.com' + } +} + +# Default API end-point for the base connection class. +DEFAULT_ENDPOINT = 'uk-1' + +# Retrieved from http://www.serverlove.com/cloud-server-faqs/api-questions/ +STANDARD_DRIVES = { + '679f5f44-0be7-4745-a658-cccd4334c1aa': { + 'uuid': '679f5f44-0be7-4745-a658-cccd4334c1aa', + 'description': 'CentOS 5.5', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '5f2e0e29-2937-42b9-b362-d2d07eddbdeb': { + 'uuid': '5f2e0e29-2937-42b9-b362-d2d07eddbdeb', + 'description': 'Ubuntu Linux 10.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '5795b68f-ed26-4639-b41d-c93235062b6b': { + 'uuid': '5795b68f-ed26-4639-b41d-c93235062b6b', + 'description': 'Debian Linux 5', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '41993a02-0b22-4e49-bb47-0aa8975217e4': { + 'uuid': '41993a02-0b22-4e49-bb47-0aa8975217e4', + 'description': 'Windows Server 2008 R2 Standard', + 'size_gunzipped': '15GB', + 'supports_deployment': False, + }, + '85623ca1-9c2a-4398-a771-9a43c347e86b': { + 'uuid': '85623ca1-9c2a-4398-a771-9a43c347e86b', + 'description': 'Windows Web Server 2008 R2', + 'size_gunzipped': '15GB', + 'supports_deployment': False, + } +} + + +class ServerLoveConnection(ElasticStackBaseConnection): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + + +class ServerLoveNodeDriver(ElasticStackBaseNodeDriver): + type = Provider.SERVERLOVE + api_name = 'serverlove' + website = 'http://www.serverlove.com/' + name = 'ServerLove' + connectionCls = ServerLoveConnection + features = {'create_node': ['generates_password']} + _standard_drives = STANDARD_DRIVES diff -Nru libcloud-0.5.0/libcloud/compute/drivers/skalicloud.py libcloud-0.15.1/libcloud/compute/drivers/skalicloud.py --- libcloud-0.5.0/libcloud/compute/drivers/skalicloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/skalicloud.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +skalicloud Driver +""" + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver +from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection + + +# API end-points +API_ENDPOINTS = { + 'my-1': { + 'name': 'Malaysia, Kuala Lumpur', + 'country': 'Malaysia', + 'host': 'api.sdg-my.skalicloud.com' + } +} + +# Default API end-point for the base connection class. +DEFAULT_ENDPOINT = 'my-1' + +# Retrieved from http://www.skalicloud.com/cloud-api/ +STANDARD_DRIVES = { + '90aa51f2-15c0-4cff-81ee-e93aa20b9468': { + 'uuid': '90aa51f2-15c0-4cff-81ee-e93aa20b9468', + 'description': 'CentOS 5.5 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f': { + 'uuid': 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f', + 'description': 'Debian 5 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '3051699a-a536-4220-aeb5-67f2ec101a09': { + 'uuid': '3051699a-a536-4220-aeb5-67f2ec101a09', + 'description': 'Ubuntu Server 10.10 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9': { + 'uuid': '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9', + 'description': 'Windows 2008R2 Web Edition', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '93bf390e-4f46-4252-a8bc-9d6d80e3f955': { + 'uuid': '93bf390e-4f46-4252-a8bc-9d6d80e3f955', + 'description': 'Windows Server 2008R2 Standard', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + } +} + + +class SkaliCloudConnection(ElasticStackBaseConnection): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + + +class SkaliCloudNodeDriver(ElasticStackBaseNodeDriver): + type = Provider.SKALICLOUD + api_name = 'skalicloud' + name = 'skalicloud' + website = 'http://www.skalicloud.com/' + connectionCls = SkaliCloudConnection + features = {"create_node": ["generates_password"]} + _standard_drives = STANDARD_DRIVES diff -Nru libcloud-0.5.0/libcloud/compute/drivers/slicehost.py libcloud-0.15.1/libcloud/compute/drivers/slicehost.py --- libcloud-0.5.0/libcloud/compute/drivers/slicehost.py 2011-05-10 02:11:44.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/slicehost.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,255 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Slicehost Driver -""" -import base64 -import socket - -from xml.etree import ElementTree as ET -from xml.parsers.expat import ExpatError - -from libcloud.common.base import ConnectionKey, Response -from libcloud.compute.types import ( - NodeState, Provider, InvalidCredsError, MalformedResponseError) -from libcloud.compute.base import NodeSize, NodeDriver, NodeImage, NodeLocation -from libcloud.compute.base import Node, is_private_subnet - -class SlicehostResponse(Response): - - def parse_body(self): - # length of 1 can't be valid XML, but on destroy node, - # slicehost returns a 1 byte response with a "Content-Type: - # application/xml" header. booya. - if not self.body or len(self.body) <= 1: - return None - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError( - "Failed to parse XML", - body=self.body, - driver=SlicehostNodeDriver) - return body - - def parse_error(self): - if self.status == 401: - raise InvalidCredsError(self.body) - - try: - body = ET.XML(self.body) - except: - raise MalformedResponseError( - "Failed to parse XML", - body=self.body, - driver=SlicehostNodeDriver) - try: - return "; ".join([ err.text - for err in - body.findall('error') ]) - except ExpatError: - return self.body - - -class SlicehostConnection(ConnectionKey): - """ - Connection class for the Slicehost driver - """ - - host = 'api.slicehost.com' - responseCls = SlicehostResponse - - def add_default_headers(self, headers): - headers['Authorization'] = ('Basic %s' - % (base64.b64encode('%s:' % self.key))) - return headers - - -class SlicehostNodeDriver(NodeDriver): - """ - Slicehost node driver - """ - - connectionCls = SlicehostConnection - - type = Provider.SLICEHOST - name = 'Slicehost' - - features = {"create_node": ["generates_password"]} - - NODE_STATE_MAP = { 'active': NodeState.RUNNING, - 'build': NodeState.PENDING, - 'reboot': NodeState.REBOOTING, - 'hard_reboot': NodeState.REBOOTING, - 'terminated': NodeState.TERMINATED } - - def list_nodes(self): - return self._to_nodes(self.connection.request('/slices.xml').object) - - def list_sizes(self, location=None): - return self._to_sizes(self.connection.request('/flavors.xml').object) - - def list_images(self, location=None): - return self._to_images(self.connection.request('/images.xml').object) - - def list_locations(self): - return [ - NodeLocation(0, 'Slicehost St. Louis (STL-A)', 'US', self), - NodeLocation(0, 'Slicehost St. Louis (STL-B)', 'US', self), - NodeLocation(0, 'Slicehost Dallas-Fort Worth (DFW-1)', 'US', self) - ] - - def create_node(self, **kwargs): - name = kwargs['name'] - image = kwargs['image'] - size = kwargs['size'] - uri = '/slices.xml' - - # create a slice obj - root = ET.Element('slice') - el_name = ET.SubElement(root, 'name') - el_name.text = name - flavor_id = ET.SubElement(root, 'flavor-id') - flavor_id.text = str(size.id) - image_id = ET.SubElement(root, 'image-id') - image_id.text = str(image.id) - xml = ET.tostring(root) - - node = self._to_nodes( - self.connection.request( - uri, - method='POST', - data=xml, - headers={'Content-Type': 'application/xml'} - ).object - )[0] - return node - - def reboot_node(self, node): - """Reboot the node by passing in the node object""" - - # 'hard' could bubble up as kwarg depending on how reboot_node - # turns out. Defaulting to soft reboot. - #hard = False - #reboot = self.api.hard_reboot if hard else self.api.reboot - #expected_status = 'hard_reboot' if hard else 'reboot' - - uri = '/slices/%s/reboot.xml' % (node.id) - node = self._to_nodes( - self.connection.request(uri, method='PUT').object - )[0] - return node.state == NodeState.REBOOTING - - def destroy_node(self, node): - """Destroys the node - - Requires 'Allow Slices to be deleted or rebuilt from the API' to be - ticked at https://manage.slicehost.com/api, otherwise returns:: - - You must enable slice deletes in the SliceManager - Permission denied - - """ - uri = '/slices/%s/destroy.xml' % (node.id) - self.connection.request(uri, method='PUT') - return True - - def _to_nodes(self, object): - if object.tag == 'slice': - return [ self._to_node(object) ] - node_elements = object.findall('slice') - return [ self._to_node(el) for el in node_elements ] - - def _to_node(self, element): - - attrs = [ 'name', 'image-id', 'progress', 'id', 'bw-out', 'bw-in', - 'flavor-id', 'status', 'ip-address', 'root-password' ] - - node_attrs = {} - for attr in attrs: - node_attrs[attr] = element.findtext(attr) - - # slicehost does not determine between public and private, so we - # have to figure it out - public_ip = [] - private_ip = [] - - ip_address = element.findtext('ip-address') - if is_private_subnet(ip_address): - private_ip.append(ip_address) - else: - public_ip.append(ip_address) - - for addr in element.findall('addresses/address'): - ip = addr.text - try: - socket.inet_aton(ip) - except socket.error: - # not a valid ip - continue - if is_private_subnet(ip): - private_ip.append(ip) - else: - public_ip.append(ip) - - public_ip = list(set(public_ip)) - - try: - state = self.NODE_STATE_MAP[element.findtext('status')] - except: - state = NodeState.UNKNOWN - - # for consistency with other drivers, we put this in two places. - node_attrs['password'] = node_attrs['root-password'] - extra = {} - for k in node_attrs.keys(): - ek = k.replace("-", "_") - extra[ek] = node_attrs[k] - n = Node(id=element.findtext('id'), - name=element.findtext('name'), - state=state, - public_ip=public_ip, - private_ip=private_ip, - driver=self.connection.driver, - extra=extra) - return n - - def _to_sizes(self, object): - if object.tag == 'flavor': - return [ self._to_size(object) ] - elements = object.findall('flavor') - return [ self._to_size(el) for el in elements ] - - def _to_size(self, element): - s = NodeSize(id=int(element.findtext('id')), - name=str(element.findtext('name')), - ram=int(element.findtext('ram')), - disk=None, # XXX: needs hardcode - bandwidth=None, # XXX: needs hardcode - price=float(element.findtext('price'))/(100*24*30), - driver=self.connection.driver) - return s - - def _to_images(self, object): - if object.tag == 'image': - return [ self._to_image(object) ] - elements = object.findall('image') - return [ self._to_image(el) for el in elements ] - - def _to_image(self, element): - i = NodeImage(id=int(element.findtext('id')), - name=str(element.findtext('name')), - driver=self.connection.driver) - return i diff -Nru libcloud-0.5.0/libcloud/compute/drivers/softlayer.py libcloud-0.15.1/libcloud/compute/drivers/softlayer.py --- libcloud-0.5.0/libcloud/compute/drivers/softlayer.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/softlayer.py 2014-06-11 14:27:59.000000000 +0000 @@ -17,122 +17,116 @@ """ import time -import xmlrpclib - -import libcloud +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.types import Provider, NodeState -from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage +from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \ + NodeImage + +DEFAULT_DOMAIN = 'example.com' +DEFAULT_CPU_SIZE = 1 +DEFAULT_RAM_SIZE = 2048 +DEFAULT_DISK_SIZE = 100 DATACENTERS = { - 'sea01': {'country': 'US'}, - 'wdc01': {'country': 'US'}, - 'dal01': {'country': 'US'} + 'hou02': {'country': 'US'}, + 'sea01': {'country': 'US', 'name': 'Seattle - West Coast U.S.'}, + 'wdc01': {'country': 'US', 'name': 'Washington, DC - East Coast U.S.'}, + 'dal01': {'country': 'US'}, + 'dal02': {'country': 'US'}, + 'dal04': {'country': 'US'}, + 'dal05': {'country': 'US', 'name': 'Dallas - Central U.S.'}, + 'dal06': {'country': 'US'}, + 'dal07': {'country': 'US'}, + 'sjc01': {'country': 'US', 'name': 'San Jose - West Coast U.S.'}, + 'sng01': {'country': 'SG', 'name': 'Singapore - Southeast Asia'}, + 'ams01': {'country': 'NL', 'name': 'Amsterdam - Western Europe'}, } NODE_STATE_MAP = { 'RUNNING': NodeState.RUNNING, - 'HALTED': NodeState.TERMINATED, - 'PAUSED': NodeState.TERMINATED, + 'HALTED': NodeState.UNKNOWN, + 'PAUSED': NodeState.UNKNOWN, + 'INITIATING': NodeState.PENDING } -DEFAULT_PACKAGE = 46 +SL_BASE_TEMPLATES = [ + { + 'name': '1 CPU, 1GB ram, 25GB', + 'ram': 1024, + 'disk': 25, + 'cpus': 1, + }, { + 'name': '1 CPU, 1GB ram, 100GB', + 'ram': 1024, + 'disk': 100, + 'cpus': 1, + }, { + 'name': '1 CPU, 2GB ram, 100GB', + 'ram': 2 * 1024, + 'disk': 100, + 'cpus': 1, + }, { + 'name': '1 CPU, 4GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 1, + }, { + 'name': '2 CPU, 2GB ram, 100GB', + 'ram': 2 * 1024, + 'disk': 100, + 'cpus': 2, + }, { + 'name': '2 CPU, 4GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 2, + }, { + 'name': '2 CPU, 8GB ram, 100GB', + 'ram': 8 * 1024, + 'disk': 100, + 'cpus': 2, + }, { + 'name': '4 CPU, 4GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 4, + }, { + 'name': '4 CPU, 8GB ram, 100GB', + 'ram': 8 * 1024, + 'disk': 100, + 'cpus': 4, + }, { + 'name': '6 CPU, 4GB ram, 100GB', + 'ram': 4 * 1024, + 'disk': 100, + 'cpus': 6, + }, { + 'name': '6 CPU, 8GB ram, 100GB', + 'ram': 8 * 1024, + 'disk': 100, + 'cpus': 6, + }, { + 'name': '8 CPU, 8GB ram, 100GB', + 'ram': 8 * 1024, + 'disk': 100, + 'cpus': 8, + }, { + 'name': '8 CPU, 16GB ram, 100GB', + 'ram': 16 * 1024, + 'disk': 100, + 'cpus': 8, + }] + +SL_TEMPLATES = {} +for i, template in enumerate(SL_BASE_TEMPLATES): + # Add local disk templates + local = template.copy() + local['local_disk'] = True + SL_TEMPLATES[i] = local -SL_IMAGES = [ - {'id': 1684, 'name': 'CentOS 5 - Minimal Install (32 bit)'}, - {'id': 1685, 'name': 'CentOS 5 - Minimal Install (64 bit)'}, - {'id': 1686, 'name': 'CentOS 5 - LAMP Install (32 bit)'}, - {'id': 1687, 'name': 'CentOS 5 - LAMP Install (64 bit)'}, - {'id': 1688, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (32 bit)'}, - {'id': 1689, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (64 bit)'}, - {'id': 1690, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (32 bit)'}, - {'id': 1691, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (64 bit)'}, - {'id': 1692, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit)'}, - {'id': 1693, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit)'}, - {'id': 1694, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (32 bit)'}, - {'id': 1695, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (64 bit)'}, - {'id': 1696, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit)'}, - {'id': 1697, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit)'}, - {'id': 1698, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (32 bit)'}, - {'id': 1699, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (64 bit)'}, - {'id': 1700, 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)'}, - {'id': 1701, 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)'}, - {'id': 1703, 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)'}, - {'id': 1705, 'name': 'Windows Server 2008 Standard Edition (64bit)'}, - {'id': 1715, 'name': 'Windows Server 2003 Datacenter SP2 (64 bit)'}, - {'id': 1716, 'name': 'Windows Server 2003 Datacenter SP2 (32 bit)'}, - {'id': 1742, 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)'}, - {'id': 1752, 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)'}, - {'id': 1756, 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)'}, - {'id': 1761, 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)'}, - {'id': 1766, 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)'}, - {'id': 1770, 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)'}, - {'id': 1857, 'name': 'Windows Server 2008 R2 Standard Edition (64bit)'}, - {'id': 1860, 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)'}, - {'id': 1863, 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)'}, -] - -""" -The following code snippet will print out all available "prices" - mask = { 'items': '' } - res = self.connection.request( - "SoftLayer_Product_Package", - "getObject", - res, - id=46, - object_mask=mask - ) - - from pprint import pprint; pprint(res) -""" -SL_TEMPLATES = { - 'sl1': { - 'imagedata': { - 'name': '2 x 2.0 GHz, 1GB ram, 100GB', - 'ram': 1024, - 'disk': 100, - 'bandwidth': None - }, - 'prices': [ - {'id': 1644}, # 1 GB - {'id': 1639}, # 100 GB (SAN) - {'id': 1963}, # Private 2 x 2.0 GHz Cores - {'id': 21}, # 1 IP Address - {'id': 55}, # Host Ping - {'id': 58}, # Automated Notification - {'id': 1800}, # 0 GB Bandwidth - {'id': 57}, # Email and Ticket - {'id': 274}, # 1000 Mbps Public & Private Networks - {'id': 905}, # Reboot / Remote Console - {'id': 418}, # Nessus Vulnerability Assessment & Reporting - {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account - ], - }, - 'sl2': { - 'imagedata': { - 'name': '2 x 2.0 GHz, 4GB ram, 350GB', - 'ram': 4096, - 'disk': 350, - 'bandwidth': None - }, - 'prices': [ - {'id': 1646}, # 4 GB - {'id': 1639}, # 100 GB (SAN) - This is the only available "First Disk" - {'id': 1638}, # 250 GB (SAN) - {'id': 1963}, # Private 2 x 2.0 GHz Cores - {'id': 21}, # 1 IP Address - {'id': 55}, # Host Ping - {'id': 58}, # Automated Notification - {'id': 1800}, # 0 GB Bandwidth - {'id': 57}, # Email and Ticket - {'id': 274}, # 1000 Mbps Public & Private Networks - {'id': 905}, # Reboot / Remote Console - {'id': 418}, # Nessus Vulnerability Assessment & Reporting - {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account - ], - } -} class SoftLayerException(LibcloudError): """ @@ -140,71 +134,39 @@ """ pass -class SoftLayerSafeTransport(xmlrpclib.SafeTransport): - pass - -class SoftLayerTransport(xmlrpclib.Transport): - pass - -class SoftLayerProxy(xmlrpclib.ServerProxy): - transportCls = (SoftLayerTransport, SoftLayerSafeTransport) - API_PREFIX = "http://api.service.softlayer.com/xmlrpc/v3" - - def __init__(self, service, user_agent, verbose=0): - cls = self.transportCls[0] - if SoftLayerProxy.API_PREFIX[:8] == "https://": - cls = self.transportCls[1] - t = cls(use_datetime=0) - t.user_agent = user_agent - xmlrpclib.ServerProxy.__init__( - self, - uri="%s/%s" % (SoftLayerProxy.API_PREFIX, service), - transport=t, - verbose=verbose - ) -class SoftLayerConnection(object): - """ - Connection class for the SoftLayer driver - """ +class SoftLayerResponse(XMLRPCResponse): + defaultExceptionCls = SoftLayerException + exceptions = { + 'SoftLayer_Account': InvalidCredsError, + } - proxyCls = SoftLayerProxy - driver = None - def __init__(self, user, key): - self.user = user - self.key = key - self.ua = [] +class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey): + responseCls = SoftLayerResponse + host = 'api.softlayer.com' + endpoint = '/xmlrpc/v3' def request(self, service, method, *args, **kwargs): - sl = self.proxyCls(service, self._user_agent()) - headers = {} headers.update(self._get_auth_headers()) headers.update(self._get_init_params(service, kwargs.get('id'))) - headers.update(self._get_object_mask(service, kwargs.get('object_mask'))) - params = [{'headers': headers}] + list(args) - - try: - return getattr(sl, method)(*params) - except xmlrpclib.Fault, e: - if e.faultCode == "SoftLayer_Account": - raise InvalidCredsError(e.faultString) - raise SoftLayerException(e) - - def _user_agent(self): - return 'libcloud/%s (%s)%s' % ( - libcloud.__version__, - self.driver.name, - "".join([" (%s)" % x for x in self.ua])) - - def user_agent_append(self, s): - self.ua.append(s) + headers.update( + self._get_object_mask(service, kwargs.get('object_mask'))) + headers.update( + self._get_object_mask(service, kwargs.get('object_mask'))) + + args = ({'headers': headers}, ) + args + endpoint = '%s/%s' % (self.endpoint, service) + + return super(SoftLayerConnection, self).request(method, *args, + **{'endpoint': + endpoint}) def _get_auth_headers(self): return { 'authenticate': { - 'username': self.user, + 'username': self.user_id, 'apiKey': self.key } } @@ -225,6 +187,7 @@ else: return {} + class SoftLayerNodeDriver(NodeDriver): """ SoftLayer node driver @@ -233,156 +196,233 @@ - password: root password - hourlyRecurringFee: hourly price (if applicable) - recurringFee : flat rate (if applicable) - - recurringMonths : The number of months in which the recurringFee will be incurred. + - recurringMonths : The number of months in which the recurringFee + will be incurred. """ connectionCls = SoftLayerConnection name = 'SoftLayer' + website = 'http://www.softlayer.com/' type = Provider.SOFTLAYER - features = {"create_node": ["generates_password"]} - - def __init__(self, key, secret=None, secure=False): - self.key = key - self.secret = secret - self.connection = self.connectionCls(key, secret) - self.connection.driver = self + features = {'create_node': ['generates_password']} def _to_node(self, host): try: - password = host['softwareComponents'][0]['passwords'][0]['password'] + password = \ + host['operatingSystem']['passwords'][0]['password'] except (IndexError, KeyError): password = None - hourlyRecurringFee = host.get('billingItem', {}).get('hourlyRecurringFee', 0) - recurringFee = host.get('billingItem', {}).get('recurringFee', 0) - recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) + hourlyRecurringFee = host.get('billingItem', {}).get( + 'hourlyRecurringFee', 0) + recurringFee = host.get('billingItem', {}).get('recurringFee', 0) + recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) + createDate = host.get('createDate', None) + + # When machine is launching it gets state halted + # we change this to pending + state = NODE_STATE_MAP.get(host['powerState']['keyName'], + NodeState.UNKNOWN) + + if not password and state == NodeState.UNKNOWN: + state = NODE_STATE_MAP['INITIATING'] + + public_ips = [] + private_ips = [] + + if 'primaryIpAddress' in host: + public_ips.append(host['primaryIpAddress']) + + if 'primaryBackendIpAddress' in host: + private_ips.append(host['primaryBackendIpAddress']) + + image = host.get('operatingSystem', {}).get('softwareLicense', {}) \ + .get('softwareDescription', {}) \ + .get('longDescription', None) return Node( id=host['id'], - name=host['hostname'], - state=NODE_STATE_MAP.get( - host['powerState']['keyName'], - NodeState.UNKNOWN - ), - public_ip=[host['primaryIpAddress']], - private_ip=[host['primaryBackendIpAddress']], + name=host['fullyQualifiedDomainName'], + state=state, + public_ips=public_ips, + private_ips=private_ips, driver=self, extra={ + 'hostname': host['hostname'], + 'fullyQualifiedDomainName': host['fullyQualifiedDomainName'], 'password': password, + 'maxCpu': host.get('maxCpu', None), + 'datacenter': host.get('datacenter', {}).get('longName', None), + 'maxMemory': host.get('maxMemory', None), + 'image': image, 'hourlyRecurringFee': hourlyRecurringFee, 'recurringFee': recurringFee, 'recurringMonths': recurringMonths, + 'created': createDate, } ) - def _to_nodes(self, hosts): - return [self._to_node(h) for h in hosts] - def destroy_node(self, node): - billing_item = self.connection.request( - "SoftLayer_Virtual_Guest", - "getBillingItem", - id=node.id + self.connection.request( + 'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id ) + return True - if billing_item: - res = self.connection.request( - "SoftLayer_Billing_Item", - "cancelService", - id=billing_item['id'] - ) - return res - else: - return False + def reboot_node(self, node): + self.connection.request( + 'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id + ) + return True + + def ex_stop_node(self, node): + self.connection.request( + 'SoftLayer_Virtual_Guest', 'powerOff', id=node.id + ) + return True - def _get_order_information(self, order_id, timeout=1200, check_interval=5): + def ex_start_node(self, node): + self.connection.request( + 'SoftLayer_Virtual_Guest', 'powerOn', id=node.id + ) + return True + + def _get_order_information(self, node_id, timeout=1200, check_interval=5): mask = { - 'orderTopLevelItems': { - 'billingItem': { - 'resource': { - 'softwareComponents': { - 'passwords': '' - }, - 'powerState': '', - } - }, - } - } + 'billingItem': '', + 'powerState': '', + 'operatingSystem': {'passwords': ''}, + 'provisionDate': '', + } for i in range(0, timeout, check_interval): - try: - res = self.connection.request( - "SoftLayer_Billing_Order", - "getObject", - id=order_id, - object_mask=mask - ) - item = res['orderTopLevelItems'][0]['billingItem']['resource'] - if item['softwareComponents'][0]['passwords']: - return item + res = self.connection.request( + 'SoftLayer_Virtual_Guest', + 'getObject', + id=node_id, + object_mask=mask + ).object - except (KeyError, IndexError): - pass + if res.get('provisionDate', None): + return res time.sleep(check_interval) - return None + raise SoftLayerException('Timeout on getting node details') def create_node(self, **kwargs): """Create a new SoftLayer node - See L{NodeDriver.create_node} for more keyword args. - @keyword ex_domain: e.g. libcloud.org - @type ex_domain: C{string} + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_domain: e.g. libcloud.org + :type ex_domain: ``str`` + :keyword ex_cpus: e.g. 2 + :type ex_cpus: ``int`` + :keyword ex_disk: e.g. 100 + :type ex_disk: ``int`` + :keyword ex_ram: e.g. 2048 + :type ex_ram: ``int`` + :keyword ex_bandwidth: e.g. 100 + :type ex_bandwidth: ``int`` + :keyword ex_local_disk: e.g. True + :type ex_local_disk: ``bool`` + :keyword ex_datacenter: e.g. Dal05 + :type ex_datacenter: ``str`` + :keyword ex_os: e.g. UBUNTU_LATEST + :type ex_os: ``str`` """ name = kwargs['name'] - image = kwargs['image'] - size = kwargs['size'] - domain = kwargs.get('ex_domain') - location = kwargs['location'] - if domain == None: - if name.find(".") != -1: - domain = name[name.find('.')+1:] + os = 'DEBIAN_LATEST' + if 'ex_os' in kwargs: + os = kwargs['ex_os'] + elif 'image' in kwargs: + os = kwargs['image'].id + + size = kwargs.get('size', NodeSize(id=123, name='Custom', ram=None, + disk=None, bandwidth=None, + price=None, + driver=self.connection.driver)) + ex_size_data = SL_TEMPLATES.get(int(size.id)) or {} + # plan keys are ints + cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \ + DEFAULT_CPU_SIZE + ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \ + DEFAULT_RAM_SIZE + bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10 + hourly = 'true' if kwargs.get('ex_hourly', True) else 'false' + + local_disk = 'true' + if ex_size_data.get('local_disk') is False: + local_disk = 'false' + + if kwargs.get('ex_local_disk') is False: + local_disk = 'false' + + disk_size = DEFAULT_DISK_SIZE + if size.disk: + disk_size = size.disk + if kwargs.get('ex_disk'): + disk_size = kwargs.get('ex_disk') + + datacenter = '' + if 'ex_datacenter' in kwargs: + datacenter = kwargs['ex_datacenter'] + elif 'location' in kwargs: + datacenter = kwargs['location'].id - if domain == None: + domain = kwargs.get('ex_domain') + if domain is None: + if name.find('.') != -1: + domain = name[name.find('.') + 1:] + if domain is None: # TODO: domain is a required argument for the Sofylayer API, but it # it shouldn't be. - domain = "exmaple.com" + domain = DEFAULT_DOMAIN - res = {'prices': SL_TEMPLATES[size.id]['prices']} - res['packageId'] = DEFAULT_PACKAGE - res['prices'].append({'id': image.id}) # Add OS to order - res['location'] = location.id - res['complexType'] = 'SoftLayer_Container_Product_Order_Virtual_Guest' - res['quantity'] = 1 - res['useHourlyPricing'] = True - res['virtualGuests'] = [ - { - 'hostname': name, - 'domain': domain - } - ] + newCCI = { + 'hostname': name, + 'domain': domain, + 'startCpus': cpu_count, + 'maxMemory': ram, + 'networkComponents': [{'maxSpeed': bandwidth}], + 'hourlyBillingFlag': hourly, + 'operatingSystemReferenceCode': os, + 'localDiskFlag': local_disk, + 'blockDevices': [ + { + 'device': '0', + 'diskImage': { + 'capacity': disk_size, + } + } + ] + + } + + if datacenter: + newCCI['datacenter'] = {'name': datacenter} res = self.connection.request( - "SoftLayer_Product_Order", - "placeOrder", - res - ) + 'SoftLayer_Virtual_Guest', 'createObject', newCCI + ).object - order_id = res['orderId'] - raw_node = self._get_order_information(order_id) + node_id = res['id'] + raw_node = self._get_order_information(node_id) return self._to_node(raw_node) def _to_image(self, img): return NodeImage( - id=img['id'], - name=img['name'], + id=img['template']['operatingSystemReferenceCode'], + name=img['itemPrice']['item']['description'], driver=self.connection.driver ) def list_images(self, location=None): - return [self._to_image(i) for i in SL_IMAGES] + result = self.connection.request( + 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions' + ).object + return [self._to_image(i) for i in result['operatingSystems']] def _to_size(self, id, size): return NodeSize( @@ -390,38 +430,39 @@ name=size['name'], ram=size['ram'], disk=size['disk'], - bandwidth=size['bandwidth'], + bandwidth=size.get('bandwidth'), price=None, driver=self.connection.driver, ) def list_sizes(self, location=None): - return [self._to_size(id, s['imagedata']) for id, s in SL_TEMPLATES.iteritems()] + return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()] def _to_loc(self, loc): - return NodeLocation( - id=loc['id'], - name=loc['name'], - country=DATACENTERS[loc['name']]['country'], - driver=self - ) + country = 'UNKNOWN' + loc_id = loc['template']['datacenter']['name'] + name = loc_id + + if loc_id in DATACENTERS: + country = DATACENTERS[loc_id]['country'] + name = DATACENTERS[loc_id].get('name', loc_id) + return NodeLocation(id=loc_id, name=name, + country=country, driver=self) def list_locations(self): res = self.connection.request( - "SoftLayer_Location_Datacenter", - "getDatacenters" - ) - - # checking "in DATACENTERS", because some of the locations returned by getDatacenters are not useable. - return [self._to_loc(l) for l in res if l['name'] in DATACENTERS] + 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions' + ).object + return [self._to_loc(l) for l in res['datacenters']] def list_nodes(self): mask = { 'virtualGuests': { 'powerState': '', - 'softwareComponents': { - 'passwords': '' - }, + 'hostname': '', + 'maxMemory': '', + 'datacenter': '', + 'operatingSystem': {'passwords': ''}, 'billingItem': '', }, } @@ -429,14 +470,5 @@ "SoftLayer_Account", "getVirtualGuests", object_mask=mask - ) - nodes = self._to_nodes(res) - return nodes - - def reboot_node(self, node): - res = self.connection.request( - "SoftLayer_Virtual_Guest", - "rebootHard", - id=node.id - ) - return res + ).object + return [self._to_node(h) for h in res] diff -Nru libcloud-0.5.0/libcloud/compute/drivers/vcloud.py libcloud-0.15.1/libcloud/compute/drivers/vcloud.py --- libcloud-0.5.0/libcloud/compute/drivers/vcloud.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/vcloud.py 2014-06-11 14:27:59.000000000 +0000 @@ -15,31 +15,53 @@ """ VMware vCloud driver. """ +import copy +import sys +import re import base64 -import httplib +import os +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import b +from libcloud.utils.py3 import next + +urlparse = urlparse.urlparse + import time -from urlparse import urlparse -from xml.etree import ElementTree as ET +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + from xml.parsers.expat import ExpatError -from libcloud.common.base import Response, ConnectionUserAndKey -from libcloud.common.types import InvalidCredsError +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver, NodeLocation -from libcloud.compute.base import NodeSize, NodeImage, NodeAuthPassword +from libcloud.compute.base import NodeSize, NodeImage """ From vcloud api "The VirtualQuantity element defines the number of MB of memory. This should be either 512 or a multiple of 1024 (1 GB)." """ -VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1,9)] +VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1, 9)] +# Default timeout (in seconds) for long running tasks DEFAULT_TASK_COMPLETION_TIMEOUT = 600 -def get_url_path(url): - return urlparse(url.strip()).path +DEFAULT_API_VERSION = '0.8' + +""" +Valid vCloud API v1.5 input values. +""" +VIRTUAL_CPU_VALS_1_5 = [i for i in range(1, 9)] +FENCE_MODE_VALS_1_5 = ['bridged', 'isolated', 'natRouted'] +IP_MODE_VALS_1_5 = ['POOL', 'DHCP', 'MANUAL', 'NONE'] + def fixxpath(root, xpath): """ElementTree wants namespaces in its xpaths, so here we add them.""" @@ -48,6 +70,81 @@ for e in xpath.split("/")]) return fixed_xpath + +def get_url_path(url): + return urlparse(url.strip()).path + + +class Vdc(object): + """ + Virtual datacenter (vDC) representation + """ + def __init__(self, id, name, driver, allocation_model=None, cpu=None, + memory=None, storage=None): + self.id = id + self.name = name + self.driver = driver + self.allocation_model = allocation_model + self.cpu = cpu + self.memory = memory + self.storage = storage + + def __repr__(self): + return ('' + % (self.id, self.name, self.driver.name)) + + +class Capacity(object): + """ + Represents CPU, Memory or Storage capacity of vDC. + """ + def __init__(self, limit, used, units): + self.limit = limit + self.used = used + self.units = units + + def __repr__(self): + return ('' + % (self.limit, self.used, self.units)) + + +class ControlAccess(object): + """ + Represents control access settings of a node + """ + class AccessLevel(object): + READ_ONLY = 'ReadOnly' + CHANGE = 'Change' + FULL_CONTROL = 'FullControl' + + def __init__(self, node, everyone_access_level, subjects=None): + self.node = node + self.everyone_access_level = everyone_access_level + if not subjects: + subjects = [] + self.subjects = subjects + + def __repr__(self): + return ('' + % (self.node, self.everyone_access_level, self.subjects)) + + +class Subject(object): + """ + User or group subject + """ + def __init__(self, type, name, access_level, id=None): + self.type = type + self.name = name + self.access_level = access_level + self.id = id + + def __repr__(self): + return ('' + % (self.type, self.name, self.access_level)) + + class InstantiateVAppXML(object): def __init__(self, name, template, net_href, cpus, memory, @@ -70,14 +167,14 @@ self.root = self._make_instantiation_root() self._add_vapp_template(self.root) - instantionation_params = ET.SubElement(self.root, - "InstantiationParams") + instantiation_params = ET.SubElement(self.root, + "InstantiationParams") # product and virtual hardware - self._make_product_section(instantionation_params) - self._make_virtual_hardware(instantionation_params) + self._make_product_section(instantiation_params) + self._make_virtual_hardware(instantiation_params) - network_config_section = ET.SubElement(instantionation_params, + network_config_section = ET.SubElement(instantiation_params, "NetworkConfigSection") network_config = ET.SubElement(network_config_section, @@ -155,7 +252,7 @@ def _add_memory(self, parent): mem_item = ET.SubElement( parent, - "Item", + 'Item', {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} ) self._add_instance_id(mem_item, '2') @@ -167,8 +264,9 @@ def _add_instance_id(self, parent, id): elm = ET.SubElement( parent, - "InstanceID", - {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + 'InstanceID', + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData'} ) elm.text = id return elm @@ -176,46 +274,40 @@ def _add_resource_type(self, parent, type): elm = ET.SubElement( parent, - "ResourceType", - {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + 'ResourceType', + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData'} ) elm.text = type return elm def _add_virtual_quantity(self, parent, amount): elm = ET.SubElement( - parent, - "VirtualQuantity", - {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} - ) + parent, + 'VirtualQuantity', + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData'} + ) elm.text = amount return elm def _add_network_association(self, parent): return ET.SubElement( parent, - "NetworkAssociation", + 'NetworkAssociation', {'href': self.net_href} ) -class VCloudResponse(Response): - - def parse_body(self): - if not self.body: - return None - try: - return ET.XML(self.body) - except ExpatError, e: - raise Exception("%s: %s" % (e, self.parse_error())) - def parse_error(self): - return self.error +class VCloudResponse(XmlResponse): def success(self): return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.ACCEPTED) + class VCloudConnection(ConnectionUserAndKey): + """ Connection class for the vCloud driver """ @@ -235,16 +327,16 @@ def _get_auth_headers(self): """Some providers need different headers than others""" return { - 'Authorization': - "Basic %s" - % base64.b64encode('%s:%s' % (self.user_id, self.key)), - 'Content-Length': 0 + 'Authorization': "Basic %s" % base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8'), + 'Content-Length': '0', + 'Accept': 'application/*+xml' } def _get_auth_token(self): if not self.token: conn = self.conn_classes[self.secure](self.host, - self.port[self.secure]) + self.port) conn.request(method='POST', url='/api/v0.8/login', headers=self._get_auth_headers()) @@ -263,15 +355,19 @@ def add_default_headers(self, headers): headers['Cookie'] = self.token + headers['Accept'] = 'application/*+xml' return headers + class VCloudNodeDriver(NodeDriver): + """ vCloud node driver """ type = Provider.VCLOUD - name = "vCloud" + name = 'vCloud' + website = 'http://www.vmware.com/products/vcloud/' connectionCls = VCloudConnection org = None _vdcs = None @@ -282,29 +378,70 @@ '3': NodeState.PENDING, '4': NodeState.RUNNING} + features = {'create_node': ['password']} + + def __new__(cls, key, secret=None, secure=True, host=None, port=None, + api_version=DEFAULT_API_VERSION, **kwargs): + if cls is VCloudNodeDriver: + if api_version == '0.8': + cls = VCloudNodeDriver + elif api_version == '1.5': + cls = VCloud_1_5_NodeDriver + elif api_version == '5.1': + cls = VCloud_5_1_NodeDriver + else: + raise NotImplementedError( + "No VCloudNodeDriver found for API version %s" % + (api_version)) + return super(VCloudNodeDriver, cls).__new__(cls) + @property def vdcs(self): + """ + vCloud virtual data centers (vDCs). + + :return: list of vDC objects + :rtype: ``list`` of :class:`Vdc` + """ if not self._vdcs: - self.connection.check_org() # make sure the org is set. + self.connection.check_org() # make sure the org is set. res = self.connection.request(self.org) self._vdcs = [ - get_url_path(i.get('href')) - for i - in res.object.findall(fixxpath(res.object, "Link")) + self._to_vdc( + self.connection.request(get_url_path(i.get('href'))).object + ) + for i in res.object.findall(fixxpath(res.object, "Link")) if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml' ] - return self._vdcs + def _to_vdc(self, vdc_elm): + return Vdc(vdc_elm.get('href'), vdc_elm.get('name'), self) + + def _get_vdc(self, vdc_name): + vdc = None + if not vdc_name: + # Return the first organisation VDC found + vdc = self.vdcs[0] + else: + for v in self.vdcs: + if v.name == vdc_name: + vdc = v + if vdc is None: + raise ValueError('%s virtual data centre could not be found', + vdc_name) + return vdc + @property def networks(self): networks = [] for vdc in self.vdcs: - res = self.connection.request(vdc).object + res = self.connection.request(get_url_path(vdc.id)).object networks.extend( [network for network in res.findall( - fixxpath(res, "AvailableNetworks/Network") + fixxpath(res, 'AvailableNetworks/Network') + )] ) @@ -316,13 +453,23 @@ driver=self.connection.driver) return image - def _to_node(self, name, elm): + def _to_node(self, elm): state = self.NODE_STATE_MAP[elm.get('status')] + name = elm.get('name') public_ips = [] private_ips = [] # Following code to find private IPs works for Terremark - connections = elm.findall('{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection/{http://www.vmware.com/vcloud/v0.8}NetworkConnection') + connections = elm.findall('%s/%s' % ( + '{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection', + fixxpath(elm, 'NetworkConnection')) + ) + if not connections: + connections = elm.findall( + fixxpath( + elm, + 'Children/Vm/NetworkConnectionSection/NetworkConnection')) + for connection in connections: ips = [ip.text for ip @@ -335,8 +482,8 @@ node = Node(id=elm.get('href'), name=name, state=state, - public_ip=public_ips, - private_ip=private_ips, + public_ips=public_ips, + private_ips=private_ips, driver=self.connection.driver) return node @@ -344,7 +491,7 @@ def _get_catalog_hrefs(self): res = self.connection.request(self.org) catalogs = [ - get_url_path(i.get('href')) + i.get('href') for i in res.object.findall(fixxpath(res.object, "Link")) if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml' ] @@ -354,20 +501,25 @@ def _wait_for_task_completion(self, task_href, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): start_time = time.time() - res = self.connection.request(task_href) + res = self.connection.request(get_url_path(task_href)) status = res.object.get('status') while status != 'success': if status == 'error': - raise Exception("Error status returned by task %s." - % task_href) + # Get error reason from the response body + error_elem = res.object.find(fixxpath(res.object, 'Error')) + error_msg = "Unknown error" + if error_elem is not None: + error_msg = error_elem.get('message') + raise Exception("Error status returned by task %s.: %s" + % (task_href, error_msg)) if status == 'canceled': raise Exception("Canceled status returned by task %s." % task_href) if (time.time() - start_time >= timeout): - raise Exception("Timeout while waiting for task %s." - % task_href) + raise Exception("Timeout (%s sec) while waiting for task %s." + % (timeout, task_href)) time.sleep(5) - res = self.connection.request(task_href) + res = self.connection.request(get_url_path(task_href)) status = res.object.get('status') def destroy_node(self, node): @@ -395,38 +547,61 @@ pass res = self.connection.request(node_path, method='DELETE') - return res.status == 202 + return res.status == httplib.ACCEPTED def reboot_node(self, node): res = self.connection.request('%s/power/action/reset' % get_url_path(node.id), method='POST') - return res.status == 202 or res.status == 204 + return res.status in [httplib.ACCEPTED, httplib.NO_CONTENT] def list_nodes(self): + return self.ex_list_nodes() + + def ex_list_nodes(self, vdcs=None): + """ + List all nodes across all vDCs. Using 'vdcs' you can specify which vDCs + should be queried. + + :param vdcs: None, vDC or a list of vDCs to query. If None all vDCs + will be queried. + :type vdcs: :class:`Vdc` + + :rtype: ``list`` of :class:`Node` + """ + if not vdcs: + vdcs = self.vdcs + if not isinstance(vdcs, (list, tuple)): + vdcs = [vdcs] nodes = [] - for vdc in self.vdcs: - res = self.connection.request(vdc) + for vdc in vdcs: + res = self.connection.request(get_url_path(vdc.id)) elms = res.object.findall(fixxpath( res.object, "ResourceEntities/ResourceEntity") ) vapps = [ - (i.get('name'), get_url_path(i.get('href'))) + (i.get('name'), i.get('href')) for i in elms - if i.get('type') - == 'application/vnd.vmware.vcloud.vApp+xml' - and i.get('name') + if i.get('type') == 'application/vnd.vmware.vcloud.vApp+xml' + and i.get('name') ] for vapp_name, vapp_href in vapps: - res = self.connection.request( - vapp_href, - headers={ - 'Content-Type': - 'application/vnd.vmware.vcloud.vApp+xml' - } - ) - nodes.append(self._to_node(vapp_name, res.object)) + try: + res = self.connection.request( + get_url_path(vapp_href), + headers={'Content-Type': + 'application/vnd.vmware.vcloud.vApp+xml'} + ) + nodes.append(self._to_node(res.object)) + except Exception: + # The vApp was probably removed since the previous vDC + # query, ignore + e = sys.exc_info()[1] + if not (e.args[0].tag.endswith('Error') and + e.args[0].get('minorErrorCode') == + 'ACCESS_TO_RESOURCE_IS_FORBIDDEN'): + raise return nodes @@ -449,10 +624,9 @@ def _get_catalogitems_hrefs(self, catalog): """Given a catalog href returns contained catalog item hrefs""" res = self.connection.request( - catalog, + get_url_path(catalog), headers={ - 'Content-Type': - 'application/vnd.vmware.vcloud.catalog+xml' + 'Content-Type': 'application/vnd.vmware.vcloud.catalog+xml' } ).object @@ -460,17 +634,16 @@ cat_item_hrefs = [i.get('href') for i in cat_items if i.get('type') == - 'application/vnd.vmware.vcloud.catalogItem+xml'] + 'application/vnd.vmware.vcloud.catalogItem+xml'] return cat_item_hrefs def _get_catalogitem(self, catalog_item): """Given a catalog item href returns elementree""" res = self.connection.request( - catalog_item, + get_url_path(catalog_item), headers={ - 'Content-Type': - 'application/vnd.vmware.vcloud.catalogItem+xml' + 'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml' } ).object @@ -479,7 +652,7 @@ def list_images(self, location=None): images = [] for vdc in self.vdcs: - res = self.connection.request(vdc).object + res = self.connection.request(get_url_path(vdc.id)).object res_ents = res.findall(fixxpath( res, "ResourceEntities/ResourceEntity") ) @@ -487,7 +660,7 @@ self._to_image(i) for i in res_ents if i.get('type') == - 'application/vnd.vmware.vcloud.vAppTemplate+xml' + 'application/vnd.vmware.vcloud.vAppTemplate+xml' ] for catalog in self._get_catalog_hrefs(): @@ -498,32 +671,46 @@ self._to_image(i) for i in res_ents if i.get('type') == - 'application/vnd.vmware.vcloud.vAppTemplate+xml' + 'application/vnd.vmware.vcloud.vAppTemplate+xml' ] - return images + def idfun(image): + return image.id - def create_node(self, **kwargs): - """Creates and returns node. + return self._uniquer(images, idfun) + def _uniquer(self, seq, idfun=None): + if idfun is None: + def idfun(x): + return x + seen = {} + result = [] + for item in seq: + marker = idfun(item) + if marker in seen: + continue + seen[marker] = 1 + result.append(item) + return result - See L{NodeDriver.create_node} for more keyword args. + def create_node(self, **kwargs): + """ + Creates and returns node. - Non-standard optional keyword arguments: - @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7" - @type ex_network: C{string} + :keyword ex_network: link to a "Network" e.g., + ``https://services.vcloudexpress...`` + :type ex_network: ``str`` - @keyword ex_vdc: link to a "VDC" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/vdc/1" - @type ex_vdc: C{string} + :keyword ex_vdc: Name of organisation's virtual data + center where vApp VMs will be deployed. + :type ex_vdc: ``str`` - @keyword ex_cpus: number of virtual cpus (limit depends on provider) - @type ex_cpus: C{int} + :keyword ex_cpus: number of virtual cpus (limit depends on provider) + :type ex_cpus: ``int`` - @keyword row: ???? - @type row: C{????} + :type ex_row: ``str`` - @keyword group: ???? - @type group: C{????} + :type ex_group: ``str`` """ name = kwargs['name'] image = kwargs['image'] @@ -536,12 +723,8 @@ network = '' password = None - if kwargs.has_key('auth'): - auth = kwargs['auth'] - if isinstance(auth, NodeAuthPassword): - password = auth.password - else: - raise ValueError('auth must be of NodeAuthPassword type') + auth = self._get_and_check_auth(kwargs.get('auth')) + password = auth.password instantiate_xml = InstantiateVAppXML( name=name, @@ -554,38 +737,40 @@ group=kwargs.get('ex_group', None) ) + vdc = self._get_vdc(kwargs.get('ex_vdc', None)) + # Instantiate VM and get identifier. + content_type = \ + 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' res = self.connection.request( - '%s/action/instantiateVAppTemplate' - % kwargs.get('vdc', self.vdcs[0]), + '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id), data=instantiate_xml.tostring(), method='POST', - headers={ - 'Content-Type': - 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' - } + headers={'Content-Type': content_type} ) - vapp_name = res.object.get('name') - vapp_href = get_url_path(res.object.get('href')) + vapp_path = get_url_path(res.object.get('href')) # Deploy the VM from the identifier. - res = self.connection.request('%s/action/deploy' % vapp_href, + res = self.connection.request('%s/action/deploy' % vapp_path, method='POST') self._wait_for_task_completion(res.object.get('href')) # Power on the VM. - res = self.connection.request('%s/power/action/powerOn' % vapp_href, + res = self.connection.request('%s/power/action/powerOn' % vapp_path, method='POST') - res = self.connection.request(vapp_href) - node = self._to_node(vapp_name, res.object) + res = self.connection.request(vapp_path) + node = self._to_node(res.object) + + if getattr(auth, "generated", False): + node.extra['password'] = auth.password return node - features = {"create_node": ["password"]} class HostingComConnection(VCloudConnection): + """ vCloud connection subclass for Hosting.com """ @@ -595,25 +780,31 @@ def _get_auth_headers(self): """hosting.com doesn't follow the standard vCloud authentication API""" return { - 'Authentication': - base64.b64encode('%s:%s' % (self.user_id, self.key)), - 'Content-Length': 0 + 'Authentication': base64.b64encode(b('%s:%s' % (self.user_id, + self.key))), + 'Content-Length': '0' } + class HostingComDriver(VCloudNodeDriver): + """ vCloud node driver for Hosting.com """ connectionCls = HostingComConnection + class TerremarkConnection(VCloudConnection): + """ vCloud connection subclass for Terremark """ host = "services.vcloudexpress.terremark.com" + class TerremarkDriver(VCloudNodeDriver): + """ vCloud node driver for Terremark """ @@ -622,3 +813,1278 @@ def list_locations(self): return [NodeLocation(0, "Terremark Texas", 'US', self)] + + +class VCloud_1_5_Connection(VCloudConnection): + + def _get_auth_headers(self): + """Compatibility for using v1.5 API under vCloud Director 5.1""" + return { + 'Authorization': "Basic %s" % base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8'), + 'Content-Length': '0', + 'Accept': 'application/*+xml;version=1.5' + } + + def _get_auth_token(self): + if not self.token: + # Log In + conn = self.conn_classes[self.secure](self.host, + self.port) + conn.request(method='POST', url='/api/sessions', + headers=self._get_auth_headers()) + + resp = conn.getresponse() + headers = dict(resp.getheaders()) + + # Set authorization token + try: + self.token = headers['x-vcloud-authorization'] + except KeyError: + raise InvalidCredsError() + + # Get the URL of the Organization + body = ET.XML(resp.read()) + self.org_name = body.get('org') + org_list_url = get_url_path( + next((link for link in body.findall(fixxpath(body, 'Link')) + if link.get('type') == + 'application/vnd.vmware.vcloud.orgList+xml')).get('href') + ) + + conn.request(method='GET', url=org_list_url, + headers=self.add_default_headers({})) + body = ET.XML(conn.getresponse().read()) + self.driver.org = get_url_path( + next((org for org in body.findall(fixxpath(body, 'Org')) + if org.get('name') == self.org_name)).get('href') + ) + + def add_default_headers(self, headers): + headers['Accept'] = 'application/*+xml;version=1.5' + headers['x-vcloud-authorization'] = self.token + return headers + + +class Instantiate_1_5_VAppXML(object): + + def __init__(self, name, template, network, vm_network=None, + vm_fence=None): + self.name = name + self.template = template + self.network = network + self.vm_network = vm_network + self.vm_fence = vm_fence + self._build_xmltree() + + def tostring(self): + return ET.tostring(self.root) + + def _build_xmltree(self): + self.root = self._make_instantiation_root() + + if self.network is not None: + instantionation_params = ET.SubElement(self.root, + 'InstantiationParams') + network_config_section = ET.SubElement(instantionation_params, + 'NetworkConfigSection') + ET.SubElement( + network_config_section, + 'Info', + {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1'} + ) + network_config = ET.SubElement(network_config_section, + 'NetworkConfig') + self._add_network_association(network_config) + + self._add_vapp_template(self.root) + + def _make_instantiation_root(self): + return ET.Element( + 'InstantiateVAppTemplateParams', + {'name': self.name, + 'deploy': 'false', + 'powerOn': 'false', + 'xml:lang': 'en', + 'xmlns': 'http://www.vmware.com/vcloud/v1.5', + 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance'} + ) + + def _add_vapp_template(self, parent): + return ET.SubElement( + parent, + 'Source', + {'href': self.template} + ) + + def _add_network_association(self, parent): + if self.vm_network is None: + # Don't set a custom vApp VM network name + parent.set('networkName', self.network.get('name')) + else: + # Set a custom vApp VM network name + parent.set('networkName', self.vm_network) + configuration = ET.SubElement(parent, 'Configuration') + ET.SubElement(configuration, 'ParentNetwork', + {'href': self.network.get('href')}) + + if self.vm_fence is None: + fencemode = self.network.find(fixxpath(self.network, + 'Configuration/FenceMode')).text + else: + fencemode = self.vm_fence + ET.SubElement(configuration, 'FenceMode').text = fencemode + + +class VCloud_1_5_NodeDriver(VCloudNodeDriver): + connectionCls = VCloud_1_5_Connection + + # Based on + # http://pubs.vmware.com/vcloud-api-1-5/api_prog/ + # GUID-843BE3AD-5EF6-4442-B864-BCAE44A51867.html + NODE_STATE_MAP = {'-1': NodeState.UNKNOWN, + '0': NodeState.PENDING, + '1': NodeState.PENDING, + '2': NodeState.PENDING, + '3': NodeState.PENDING, + '4': NodeState.RUNNING, + '5': NodeState.RUNNING, + '6': NodeState.UNKNOWN, + '7': NodeState.UNKNOWN, + '8': NodeState.STOPPED, + '9': NodeState.UNKNOWN, + '10': NodeState.UNKNOWN} + + def list_locations(self): + return [NodeLocation(id=self.connection.host, + name=self.connection.host, country="N/A", driver=self)] + + def ex_find_node(self, node_name, vdcs=None): + """ + Searches for node across specified vDCs. This is more effective than + querying all nodes to get a single instance. + + :param node_name: The name of the node to search for + :type node_name: ``str`` + + :param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs + will be searched. + :type vdcs: :class:`Vdc` + + :return: node instance or None if not found + :rtype: :class:`Node` or ``None`` + """ + if not vdcs: + vdcs = self.vdcs + if not getattr(vdcs, '__iter__', False): + vdcs = [vdcs] + for vdc in vdcs: + res = self.connection.request(get_url_path(vdc.id)) + xpath = fixxpath(res.object, "ResourceEntities/ResourceEntity") + entity_elems = res.object.findall(xpath) + for entity_elem in entity_elems: + if entity_elem.get('type') == \ + 'application/vnd.vmware.vcloud.vApp+xml' and \ + entity_elem.get('name') == node_name: + path = get_url_path(entity_elem.get('href')) + headers = {'Content-Type': + 'application/vnd.vmware.vcloud.vApp+xml'} + res = self.connection.request(path, + headers=headers) + return self._to_node(res.object) + return None + + def destroy_node(self, node): + try: + self.ex_undeploy_node(node) + except Exception: + # Some vendors don't implement undeploy at all yet, + # so catch this and move on. + pass + + res = self.connection.request(get_url_path(node.id), method='DELETE') + return res.status == httplib.ACCEPTED + + def reboot_node(self, node): + res = self.connection.request('%s/power/action/reset' + % get_url_path(node.id), + method='POST') + if res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]: + self._wait_for_task_completion(res.object.get('href')) + return True + else: + return False + + def ex_deploy_node(self, node): + """ + Deploys existing node. Equal to vApp "start" operation. + + :param node: The node to be deployed + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + data = {'powerOn': 'true', + 'xmlns': 'http://www.vmware.com/vcloud/v1.5'} + deploy_xml = ET.Element('DeployVAppParams', data) + path = get_url_path(node.id) + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.deployVAppParams+xml' + } + res = self.connection.request('%s/action/deploy' % path, + data=ET.tostring(deploy_xml), + method='POST', + headers=headers) + self._wait_for_task_completion(res.object.get('href')) + res = self.connection.request(get_url_path(node.id)) + return self._to_node(res.object) + + def ex_undeploy_node(self, node): + """ + Undeploys existing node. Equal to vApp "stop" operation. + + :param node: The node to be deployed + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + data = {'xmlns': 'http://www.vmware.com/vcloud/v1.5'} + undeploy_xml = ET.Element('UndeployVAppParams', data) + undeploy_power_action_xml = ET.SubElement(undeploy_xml, + 'UndeployPowerAction') + undeploy_power_action_xml.text = 'shutdown' + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.undeployVAppParams+xml' + } + + try: + res = self.connection.request( + '%s/action/undeploy' % get_url_path(node.id), + data=ET.tostring(undeploy_xml), + method='POST', + headers=headers) + + self._wait_for_task_completion(res.object.get('href')) + except Exception: + undeploy_power_action_xml.text = 'powerOff' + res = self.connection.request( + '%s/action/undeploy' % get_url_path(node.id), + data=ET.tostring(undeploy_xml), + method='POST', + headers=headers) + self._wait_for_task_completion(res.object.get('href')) + + res = self.connection.request(get_url_path(node.id)) + return self._to_node(res.object) + + def ex_power_off_node(self, node): + """ + Powers on all VMs under specified node. VMs need to be This operation + is allowed only when the vApp/VM is powered on. + + :param node: The node to be powered off + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + return self._perform_power_operation(node, 'powerOff') + + def ex_power_on_node(self, node): + """ + Powers on all VMs under specified node. This operation is allowed + only when the vApp/VM is powered off or suspended. + + :param node: The node to be powered on + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + return self._perform_power_operation(node, 'powerOn') + + def ex_shutdown_node(self, node): + """ + Shutdowns all VMs under specified node. This operation is allowed only + when the vApp/VM is powered on. + + :param node: The node to be shut down + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + return self._perform_power_operation(node, 'shutdown') + + def ex_suspend_node(self, node): + """ + Suspends all VMs under specified node. This operation is allowed only + when the vApp/VM is powered on. + + :param node: The node to be suspended + :type node: :class:`Node` + + :rtype: :class:`Node` + """ + return self._perform_power_operation(node, 'suspend') + + def _perform_power_operation(self, node, operation): + res = self.connection.request( + '%s/power/action/%s' % (get_url_path(node.id), operation), + method='POST') + self._wait_for_task_completion(res.object.get('href')) + res = self.connection.request(get_url_path(node.id)) + return self._to_node(res.object) + + def ex_get_control_access(self, node): + """ + Returns the control access settings for specified node. + + :param node: node to get the control access for + :type node: :class:`Node` + + :rtype: :class:`ControlAccess` + """ + res = self.connection.request( + '%s/controlAccess' % get_url_path(node.id)) + everyone_access_level = None + is_shared_elem = res.object.find( + fixxpath(res.object, "IsSharedToEveryone")) + if is_shared_elem is not None and is_shared_elem.text == 'true': + everyone_access_level = res.object.find( + fixxpath(res.object, "EveryoneAccessLevel")).text + + # Parse all subjects + subjects = [] + xpath = fixxpath(res.object, "AccessSettings/AccessSetting") + for elem in res.object.findall(xpath): + access_level = elem.find(fixxpath(res.object, "AccessLevel")).text + subject_elem = elem.find(fixxpath(res.object, "Subject")) + if subject_elem.get('type') == \ + 'application/vnd.vmware.admin.group+xml': + subj_type = 'group' + else: + subj_type = 'user' + + path = get_url_path(subject_elem.get('href')) + res = self.connection.request(path) + name = res.object.get('name') + subject = Subject(type=subj_type, + name=name, + access_level=access_level, + id=subject_elem.get('href')) + subjects.append(subject) + + return ControlAccess(node, everyone_access_level, subjects) + + def ex_set_control_access(self, node, control_access): + """ + Sets control access for the specified node. + + :param node: node + :type node: :class:`Node` + + :param control_access: control access settings + :type control_access: :class:`ControlAccess` + + :rtype: ``None`` + """ + xml = ET.Element('ControlAccessParams', + {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) + shared_to_everyone = ET.SubElement(xml, 'IsSharedToEveryone') + if control_access.everyone_access_level: + shared_to_everyone.text = 'true' + everyone_access_level = ET.SubElement(xml, 'EveryoneAccessLevel') + everyone_access_level.text = control_access.everyone_access_level + else: + shared_to_everyone.text = 'false' + + # Set subjects + if control_access.subjects: + access_settings_elem = ET.SubElement(xml, 'AccessSettings') + for subject in control_access.subjects: + setting = ET.SubElement(access_settings_elem, 'AccessSetting') + if subject.id: + href = subject.id + else: + res = self.ex_query(type=subject.type, filter='name==' + + subject.name) + if not res: + raise LibcloudError('Specified subject "%s %s" not found ' + % (subject.type, subject.name)) + href = res[0]['href'] + ET.SubElement(setting, 'Subject', {'href': href}) + ET.SubElement(setting, 'AccessLevel').text = subject.access_level + + headers = { + 'Content-Type': 'application/vnd.vmware.vcloud.controlAccess+xml' + } + self.connection.request( + '%s/action/controlAccess' % get_url_path(node.id), + data=ET.tostring(xml), + headers=headers, + method='POST') + + def ex_get_metadata(self, node): + """ + :param node: node + :type node: :class:`Node` + + :return: dictionary mapping metadata keys to metadata values + :rtype: dictionary mapping ``str`` to ``str`` + """ + res = self.connection.request('%s/metadata' % (get_url_path(node.id))) + xpath = fixxpath(res.object, 'MetadataEntry') + metadata_entries = res.object.findall(xpath) + res_dict = {} + + for entry in metadata_entries: + key = entry.findtext(fixxpath(res.object, 'Key')) + value = entry.findtext(fixxpath(res.object, 'Value')) + res_dict[key] = value + + return res_dict + + def ex_set_metadata_entry(self, node, key, value): + """ + :param node: node + :type node: :class:`Node` + + :param key: metadata key to be set + :type key: ``str`` + + :param value: metadata value to be set + :type value: ``str`` + + :rtype: ``None`` + """ + metadata_elem = ET.Element( + 'Metadata', + {'xmlns': "http://www.vmware.com/vcloud/v1.5", + 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} + ) + entry = ET.SubElement(metadata_elem, 'MetadataEntry') + key_elem = ET.SubElement(entry, 'Key') + key_elem.text = key + value_elem = ET.SubElement(entry, 'Value') + value_elem.text = value + + # send it back to the server + res = self.connection.request( + '%s/metadata' % get_url_path(node.id), + data=ET.tostring(metadata_elem), + headers={ + 'Content-Type': 'application/vnd.vmware.vcloud.metadata+xml' + }, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + + def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None, + sort_desc=None): + """ + Queries vCloud for specified type. See + http://www.vmware.com/pdf/vcd_15_api_guide.pdf for details. Each + element of the returned list is a dictionary with all attributes from + the record. + + :param type: type to query (r.g. user, group, vApp etc.) + :type type: ``str`` + + :param filter: filter expression (see documentation for syntax) + :type filter: ``str`` + + :param page: page number + :type page: ``int`` + + :param page_size: page size + :type page_size: ``int`` + + :param sort_asc: sort in ascending order by specified field + :type sort_asc: ``str`` + + :param sort_desc: sort in descending order by specified field + :type sort_desc: ``str`` + + :rtype: ``list`` of dict + """ + # This is a workaround for filter parameter encoding + # the urllib encodes (name==Developers%20Only) into + # %28name%3D%3DDevelopers%20Only%29) which is not accepted by vCloud + params = { + 'type': type, + 'pageSize': page_size, + 'page': page, + } + if sort_asc: + params['sortAsc'] = sort_asc + if sort_desc: + params['sortDesc'] = sort_desc + + url = '/api/query?' + urlencode(params) + if filter: + if not filter.startswith('('): + filter = '(' + filter + ')' + url += '&filter=' + filter.replace(' ', '+') + + results = [] + res = self.connection.request(url) + for elem in res.object: + if not elem.tag.endswith('Link'): + result = elem.attrib + result['type'] = elem.tag.split('}')[1] + results.append(result) + return results + + def create_node(self, **kwargs): + """ + Creates and returns node. If the source image is: + - vApp template - a new vApp is instantiated from template + - existing vApp - a new vApp is cloned from the source vApp. Can + not clone more vApps is parallel otherwise + resource busy error is raised. + + + @inherits: :class:`NodeDriver.create_node` + + :keyword image: OS Image to boot on node. (required). Can be a + NodeImage or existing Node that will be cloned. + :type image: :class:`NodeImage` or :class:`Node` + + :keyword ex_network: Organisation's network name for attaching vApp + VMs to. + :type ex_network: ``str`` + + :keyword ex_vdc: Name of organisation's virtual data center where + vApp VMs will be deployed. + :type ex_vdc: ``str`` + + :keyword ex_vm_names: list of names to be used as a VM and computer + name. The name must be max. 15 characters + long and follow the host name requirements. + :type ex_vm_names: ``list`` of ``str`` + + :keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for + each vApp VM. + :type ex_vm_cpu: ``int`` + + :keyword ex_vm_memory: amount of memory in MB to allocate for each + vApp VM. + :type ex_vm_memory: ``int`` + + :keyword ex_vm_script: full path to file containing guest + customisation script for each vApp VM. + Useful for creating users & pushing out + public SSH keys etc. + :type ex_vm_script: ``str`` + + :keyword ex_vm_network: Override default vApp VM network name. + Useful for when you've imported an OVF + originating from outside of the vCloud. + :type ex_vm_network: ``str`` + + :keyword ex_vm_fence: Fence mode for connecting the vApp VM network + (ex_vm_network) to the parent + organisation network (ex_network). + :type ex_vm_fence: ``str`` + + :keyword ex_vm_ipmode: IP address allocation mode for all vApp VM + network connections. + :type ex_vm_ipmode: ``str`` + + :keyword ex_deploy: set to False if the node shouldn't be deployed + (started) after creation + :type ex_deploy: ``bool`` + + :keyword ex_clone_timeout: timeout in seconds for clone/instantiate + VM operation. + Cloning might be a time consuming + operation especially when linked clones + are disabled or VMs are created on + different datastores. + Overrides the default task completion + value. + :type ex_clone_timeout: ``int`` + """ + name = kwargs['name'] + image = kwargs['image'] + ex_vm_names = kwargs.get('ex_vm_names') + ex_vm_cpu = kwargs.get('ex_vm_cpu') + ex_vm_memory = kwargs.get('ex_vm_memory') + ex_vm_script = kwargs.get('ex_vm_script') + ex_vm_fence = kwargs.get('ex_vm_fence', None) + ex_network = kwargs.get('ex_network', None) + ex_vm_network = kwargs.get('ex_vm_network', None) + ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None) + ex_deploy = kwargs.get('ex_deploy', True) + ex_vdc = kwargs.get('ex_vdc', None) + ex_clone_timeout = kwargs.get('ex_clone_timeout', + DEFAULT_TASK_COMPLETION_TIMEOUT) + + self._validate_vm_names(ex_vm_names) + self._validate_vm_cpu(ex_vm_cpu) + self._validate_vm_memory(ex_vm_memory) + self._validate_vm_fence(ex_vm_fence) + self._validate_vm_ipmode(ex_vm_ipmode) + ex_vm_script = self._validate_vm_script(ex_vm_script) + + # Some providers don't require a network link + if ex_network: + network_href = self._get_network_href(ex_network) + network_elem = self.connection.request( + get_url_path(network_href)).object + else: + network_elem = None + + vdc = self._get_vdc(ex_vdc) + + if self._is_node(image): + vapp_name, vapp_href = self._clone_node(name, + image, + vdc, + ex_clone_timeout) + else: + vapp_name, vapp_href = self._instantiate_node(name, image, + network_elem, + vdc, ex_vm_network, + ex_vm_fence, + ex_clone_timeout) + + self._change_vm_names(vapp_href, ex_vm_names) + self._change_vm_cpu(vapp_href, ex_vm_cpu) + self._change_vm_memory(vapp_href, ex_vm_memory) + self._change_vm_script(vapp_href, ex_vm_script) + self._change_vm_ipmode(vapp_href, ex_vm_ipmode) + + # Power on the VM. + if ex_deploy: + # Retry 3 times: when instantiating large number of VMs at the same + # time some may fail on resource allocation + retry = 3 + while True: + try: + res = self.connection.request( + '%s/power/action/powerOn' % get_url_path(vapp_href), + method='POST') + self._wait_for_task_completion(res.object.get('href')) + break + except Exception: + if retry <= 0: + raise + retry -= 1 + time.sleep(10) + + res = self.connection.request(get_url_path(vapp_href)) + node = self._to_node(res.object) + return node + + def _instantiate_node(self, name, image, network_elem, vdc, vm_network, + vm_fence, instantiate_timeout): + instantiate_xml = Instantiate_1_5_VAppXML( + name=name, + template=image.id, + network=network_elem, + vm_network=vm_network, + vm_fence=vm_fence + ) + + # Instantiate VM and get identifier. + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' + } + res = self.connection.request( + '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id), + data=instantiate_xml.tostring(), + method='POST', + headers=headers + ) + vapp_name = res.object.get('name') + vapp_href = res.object.get('href') + + task_href = res.object.find(fixxpath(res.object, "Tasks/Task")).get( + 'href') + self._wait_for_task_completion(task_href, instantiate_timeout) + return vapp_name, vapp_href + + def _clone_node(self, name, sourceNode, vdc, clone_timeout): + clone_xml = ET.Element( + "CloneVAppParams", + {'name': name, 'deploy': 'false', 'powerOn': 'false', + 'xmlns': "http://www.vmware.com/vcloud/v1.5", + 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} + ) + ET.SubElement(clone_xml, + 'Description').text = 'Clone of ' + sourceNode.name + ET.SubElement(clone_xml, 'Source', {'href': sourceNode.id}) + + headers = { + 'Content-Type': 'application/vnd.vmware.vcloud.cloneVAppParams+xml' + } + res = self.connection.request( + '%s/action/cloneVApp' % get_url_path(vdc.id), + data=ET.tostring(clone_xml), + method='POST', + headers=headers + ) + vapp_name = res.object.get('name') + vapp_href = res.object.get('href') + + task_href = res.object.find( + fixxpath(res.object, "Tasks/Task")).get('href') + self._wait_for_task_completion(task_href, clone_timeout) + + res = self.connection.request(get_url_path(vapp_href)) + + vms = res.object.findall(fixxpath(res.object, "Children/Vm")) + + # Fix the networking for VMs + for i, vm in enumerate(vms): + # Remove network + network_xml = ET.Element("NetworkConnectionSection", { + 'ovf:required': 'false', + 'xmlns': "http://www.vmware.com/vcloud/v1.5", + 'xmlns:ovf': 'http://schemas.dmtf.org/ovf/envelope/1'}) + ET.SubElement(network_xml, "ovf:Info").text = \ + 'Specifies the available VM network connections' + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + } + res = self.connection.request( + '%s/networkConnectionSection' % get_url_path(vm.get('href')), + data=ET.tostring(network_xml), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + # Re-add network + network_xml = vm.find(fixxpath(vm, 'NetworkConnectionSection')) + network_conn_xml = network_xml.find( + fixxpath(network_xml, 'NetworkConnection')) + network_conn_xml.set('needsCustomization', 'true') + network_conn_xml.remove( + network_conn_xml.find(fixxpath(network_xml, 'IpAddress'))) + network_conn_xml.remove( + network_conn_xml.find(fixxpath(network_xml, 'MACAddress'))) + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + } + res = self.connection.request( + '%s/networkConnectionSection' % get_url_path(vm.get('href')), + data=ET.tostring(network_xml), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + return vapp_name, vapp_href + + def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu): + """ + Sets the number of virtual CPUs for the specified VM or VMs under + the vApp. If the vapp_or_vm_id param represents a link to an vApp + all VMs that are attached to this vApp will be modified. + + Please ensure that hot-adding a virtual CPU is enabled for the + powered on virtual machines. Otherwise use this method on undeployed + vApp. + + :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If + a vApp ID is used here all attached VMs + will be modified + :type vapp_or_vm_id: ``str`` + + :keyword vm_cpu: number of virtual CPUs/cores to allocate for + specified VMs + :type vm_cpu: ``int`` + + :rtype: ``None`` + """ + self._validate_vm_cpu(vm_cpu) + self._change_vm_cpu(vapp_or_vm_id, vm_cpu) + + def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory): + """ + Sets the virtual memory in MB to allocate for the specified VM or + VMs under the vApp. If the vapp_or_vm_id param represents a link + to an vApp all VMs that are attached to this vApp will be modified. + + Please ensure that hot-change of virtual memory is enabled for the + powered on virtual machines. Otherwise use this method on undeployed + vApp. + + :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If + a vApp ID is used here all attached VMs + will be modified + :type vapp_or_vm_id: ``str`` + + :keyword vm_memory: virtual memory in MB to allocate for the + specified VM or VMs + :type vm_memory: ``int`` + + :rtype: ``None`` + """ + self._validate_vm_memory(vm_memory) + self._change_vm_memory(vapp_or_vm_id, vm_memory) + + def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size): + """ + Adds a virtual disk to the specified VM or VMs under the vApp. If the + vapp_or_vm_id param represents a link to an vApp all VMs that are + attached to this vApp will be modified. + + :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a + vApp ID is used here all attached VMs + will be modified + :type vapp_or_vm_id: ``str`` + + :keyword vm_disk_size: the disk capacity in GB that will be added + to the specified VM or VMs + :type vm_disk_size: ``int`` + + :rtype: ``None`` + """ + self._validate_vm_disk_size(vm_disk_size) + self._add_vm_disk(vapp_or_vm_id, vm_disk_size) + + @staticmethod + def _validate_vm_names(names): + if names is None: + return + hname_re = re.compile( + '^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9]*)[\-])*([A-Za-z]|[A-Za-z][A-Za-z0-9]*[A-Za-z0-9])$') # NOQA + for name in names: + if len(name) > 15: + raise ValueError( + 'The VM name "' + name + '" is too long for the computer ' + 'name (max 15 chars allowed).') + if not hname_re.match(name): + raise ValueError('The VM name "' + name + '" can not be ' + 'used. "' + name + '" is not a valid ' + 'computer name for the VM.') + + @staticmethod + def _validate_vm_memory(vm_memory): + if vm_memory is None: + return + elif vm_memory not in VIRTUAL_MEMORY_VALS: + raise ValueError( + '%s is not a valid vApp VM memory value' % vm_memory) + + @staticmethod + def _validate_vm_cpu(vm_cpu): + if vm_cpu is None: + return + elif vm_cpu not in VIRTUAL_CPU_VALS_1_5: + raise ValueError('%s is not a valid vApp VM CPU value' % vm_cpu) + + @staticmethod + def _validate_vm_disk_size(vm_disk): + if vm_disk is None: + return + elif int(vm_disk) < 0: + raise ValueError('%s is not a valid vApp VM disk space value', + vm_disk) + + @staticmethod + def _validate_vm_script(vm_script): + if vm_script is None: + return + # Try to locate the script file + if not os.path.isabs(vm_script): + vm_script = os.path.expanduser(vm_script) + vm_script = os.path.abspath(vm_script) + if not os.path.isfile(vm_script): + raise LibcloudError( + "%s the VM script file does not exist" % vm_script) + try: + open(vm_script).read() + except: + raise + return vm_script + + @staticmethod + def _validate_vm_fence(vm_fence): + if vm_fence is None: + return + elif vm_fence not in FENCE_MODE_VALS_1_5: + raise ValueError('%s is not a valid fencing mode value' % vm_fence) + + @staticmethod + def _validate_vm_ipmode(vm_ipmode): + if vm_ipmode is None: + return + elif vm_ipmode == 'MANUAL': + raise NotImplementedError( + 'MANUAL IP mode: The interface for supplying ' + 'IPAddress does not exist yet') + elif vm_ipmode not in IP_MODE_VALS_1_5: + raise ValueError( + '%s is not a valid IP address allocation mode value' + % vm_ipmode) + + def _change_vm_names(self, vapp_or_vm_id, vm_names): + if vm_names is None: + return + + vms = self._get_vm_elements(vapp_or_vm_id) + for i, vm in enumerate(vms): + if len(vm_names) <= i: + return + + # Get GuestCustomizationSection + res = self.connection.request( + '%s/guestCustomizationSection' % get_url_path(vm.get('href'))) + + # Update GuestCustomizationSection + res.object.find( + fixxpath(res.object, 'ComputerName')).text = vm_names[i] + # Remove AdminPassword from customization section + admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword')) + if admin_pass is not None: + res.object.remove(admin_pass) + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.guestCustomizationSection+xml' + } + res = self.connection.request( + '%s/guestCustomizationSection' % get_url_path(vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + # Update Vm name + req_xml = ET.Element("Vm", { + 'name': vm_names[i], + 'xmlns': "http://www.vmware.com/vcloud/v1.5"}) + res = self.connection.request( + get_url_path(vm.get('href')), + data=ET.tostring(req_xml), + method='PUT', + headers={ + 'Content-Type': 'application/vnd.vmware.vcloud.vm+xml'} + ) + self._wait_for_task_completion(res.object.get('href')) + + def _change_vm_cpu(self, vapp_or_vm_id, vm_cpu): + if vm_cpu is None: + return + + vms = self._get_vm_elements(vapp_or_vm_id) + for vm in vms: + # Get virtualHardwareSection/cpu section + res = self.connection.request( + '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href'))) + + # Update VirtualQuantity field + xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData}VirtualQuantity') + res.object.find(xpath).text = str(vm_cpu) + + headers = { + 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml' + } + res = self.connection.request( + '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + def _change_vm_memory(self, vapp_or_vm_id, vm_memory): + if vm_memory is None: + return + + vms = self._get_vm_elements(vapp_or_vm_id) + for vm in vms: + # Get virtualHardwareSection/memory section + res = self.connection.request( + '%s/virtualHardwareSection/memory' % + get_url_path(vm.get('href'))) + + # Update VirtualQuantity field + xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData}VirtualQuantity') + res.object.find(xpath).text = str(vm_memory) + + headers = { + 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml' + } + res = self.connection.request( + '%s/virtualHardwareSection/memory' % get_url_path( + vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + def _add_vm_disk(self, vapp_or_vm_id, vm_disk): + if vm_disk is None: + return + + rasd_ns = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' + 'CIM_ResourceAllocationSettingData}') + + vms = self._get_vm_elements(vapp_or_vm_id) + for vm in vms: + # Get virtualHardwareSection/disks section + res = self.connection.request( + '%s/virtualHardwareSection/disks' % + get_url_path(vm.get('href'))) + + existing_ids = [] + new_disk = None + for item in res.object.findall(fixxpath(res.object, 'Item')): + # Clean Items from unnecessary stuff + for elem in item: + if elem.tag == '%sInstanceID' % rasd_ns: + existing_ids.append(int(elem.text)) + if elem.tag in ['%sAddressOnParent' % rasd_ns, + '%sParent' % rasd_ns]: + item.remove(elem) + if item.find('%sHostResource' % rasd_ns) is not None: + new_disk = item + + new_disk = copy.deepcopy(new_disk) + disk_id = max(existing_ids) + 1 + new_disk.find('%sInstanceID' % rasd_ns).text = str(disk_id) + new_disk.find('%sElementName' % + rasd_ns).text = 'Hard Disk ' + str(disk_id) + new_disk.find('%sHostResource' % rasd_ns).set( + fixxpath(new_disk, 'capacity'), str(int(vm_disk) * 1024)) + res.object.append(new_disk) + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.rasditemslist+xml' + } + res = self.connection.request( + '%s/virtualHardwareSection/disks' % get_url_path( + vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + def _change_vm_script(self, vapp_or_vm_id, vm_script): + if vm_script is None: + return + + vms = self._get_vm_elements(vapp_or_vm_id) + try: + script = open(vm_script).read() + except: + return + + # ElementTree escapes script characters automatically. Escape + # requirements: + # http://www.vmware.com/support/vcd/doc/rest-api-doc-1.5-html/types/ + # GuestCustomizationSectionType.html + for vm in vms: + # Get GuestCustomizationSection + res = self.connection.request( + '%s/guestCustomizationSection' % get_url_path(vm.get('href'))) + + # Attempt to update any existing CustomizationScript element + try: + res.object.find( + fixxpath(res.object, 'CustomizationScript')).text = script + except: + # CustomizationScript section does not exist, insert it just + # before ComputerName + for i, e in enumerate(res.object): + if e.tag == \ + '{http://www.vmware.com/vcloud/v1.5}ComputerName': + break + e = ET.Element( + '{http://www.vmware.com/vcloud/v1.5}CustomizationScript') + e.text = script + res.object.insert(i, e) + + # Remove AdminPassword from customization section due to an API + # quirk + admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword')) + if admin_pass is not None: + res.object.remove(admin_pass) + + # Update VM's GuestCustomizationSection + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.guestCustomizationSection+xml' + } + res = self.connection.request( + '%s/guestCustomizationSection' % get_url_path(vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + def _change_vm_ipmode(self, vapp_or_vm_id, vm_ipmode): + if vm_ipmode is None: + return + + vms = self._get_vm_elements(vapp_or_vm_id) + + for vm in vms: + res = self.connection.request( + '%s/networkConnectionSection' % get_url_path(vm.get('href'))) + net_conns = res.object.findall( + fixxpath(res.object, 'NetworkConnection')) + for c in net_conns: + c.find(fixxpath(c, 'IpAddressAllocationMode')).text = vm_ipmode + + headers = { + 'Content-Type': + 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + } + + res = self.connection.request( + '%s/networkConnectionSection' % get_url_path(vm.get('href')), + data=ET.tostring(res.object), + method='PUT', + headers=headers + ) + self._wait_for_task_completion(res.object.get('href')) + + def _get_network_href(self, network_name): + network_href = None + + # Find the organisation's network href + res = self.connection.request(self.org) + links = res.object.findall(fixxpath(res.object, 'Link')) + for l in links: + if l.attrib['type'] == \ + 'application/vnd.vmware.vcloud.orgNetwork+xml' \ + and l.attrib['name'] == network_name: + network_href = l.attrib['href'] + + if network_href is None: + raise ValueError( + '%s is not a valid organisation network name' % network_name) + else: + return network_href + + def _get_vm_elements(self, vapp_or_vm_id): + res = self.connection.request(get_url_path(vapp_or_vm_id)) + if res.object.tag.endswith('VApp'): + vms = res.object.findall(fixxpath(res.object, 'Children/Vm')) + elif res.object.tag.endswith('Vm'): + vms = [res.object] + else: + raise ValueError( + 'Specified ID value is not a valid VApp or Vm identifier.') + return vms + + def _is_node(self, node_or_image): + return isinstance(node_or_image, Node) + + def _to_node(self, node_elm): + # Parse VMs as extra field + vms = [] + for vm_elem in node_elm.findall(fixxpath(node_elm, 'Children/Vm')): + public_ips = [] + private_ips = [] + + xpath = fixxpath(vm_elem, + 'NetworkConnectionSection/NetworkConnection') + for connection in vm_elem.findall(xpath): + ip = connection.find(fixxpath(connection, "IpAddress")) + if ip is not None: + private_ips.append(ip.text) + external_ip = connection.find( + fixxpath(connection, "ExternalIpAddress")) + if external_ip is not None: + public_ips.append(external_ip.text) + elif ip is not None: + public_ips.append(ip.text) + + xpath = ('{http://schemas.dmtf.org/ovf/envelope/1}' + 'OperatingSystemSection') + os_type_elem = vm_elem.find(xpath) + if os_type_elem is not None: + os_type = os_type_elem.get( + '{http://www.vmware.com/schema/ovf}osType') + else: + os_type = None + vm = { + 'id': vm_elem.get('href'), + 'name': vm_elem.get('name'), + 'state': self.NODE_STATE_MAP[vm_elem.get('status')], + 'public_ips': public_ips, + 'private_ips': private_ips, + 'os_type': os_type + } + vms.append(vm) + + # Take the node IP addresses from all VMs + public_ips = [] + private_ips = [] + for vm in vms: + public_ips.extend(vm['public_ips']) + private_ips.extend(vm['private_ips']) + + # Find vDC + vdc_id = next(link.get('href') for link + in node_elm.findall(fixxpath(node_elm, 'Link')) + if link.get('type') == + 'application/vnd.vmware.vcloud.vdc+xml') + vdc = next(vdc for vdc in self.vdcs if vdc.id == vdc_id) + + node = Node(id=node_elm.get('href'), + name=node_elm.get('name'), + state=self.NODE_STATE_MAP[node_elm.get('status')], + public_ips=public_ips, + private_ips=private_ips, + driver=self.connection.driver, + extra={'vdc': vdc.name, 'vms': vms}) + return node + + def _to_vdc(self, vdc_elm): + + def get_capacity_values(capacity_elm): + if capacity_elm is None: + return None + limit = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Limit'))) + used = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Used'))) + units = capacity_elm.findtext(fixxpath(capacity_elm, 'Units')) + return Capacity(limit, used, units) + + cpu = get_capacity_values( + vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Cpu'))) + memory = get_capacity_values( + vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Memory'))) + storage = get_capacity_values( + vdc_elm.find(fixxpath(vdc_elm, 'StorageCapacity'))) + + return Vdc(id=vdc_elm.get('href'), + name=vdc_elm.get('name'), + driver=self, + allocation_model=vdc_elm.findtext( + fixxpath(vdc_elm, 'AllocationModel')), + cpu=cpu, + memory=memory, + storage=storage) + + +class VCloud_5_1_NodeDriver(VCloud_1_5_NodeDriver): + + @staticmethod + def _validate_vm_memory(vm_memory): + if vm_memory is None: + return None + elif (vm_memory % 4) != 0: + # The vcd 5.1 virtual machine memory size must be a multiple of 4 + # MB + raise ValueError( + '%s is not a valid vApp VM memory value' % (vm_memory)) diff -Nru libcloud-0.5.0/libcloud/compute/drivers/vcl.py libcloud-0.15.1/libcloud/compute/drivers/vcl.py --- libcloud-0.5.0/libcloud/compute/drivers/vcl.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/vcl.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,302 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +VCL driver +""" + +import time + +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, Node +from libcloud.compute.base import NodeSize, NodeImage + + +class VCLResponse(XMLRPCResponse): + exceptions = { + 'VCL_Account': InvalidCredsError, + } + + +class VCLConnection(XMLRPCConnection, ConnectionUserAndKey): + endpoint = '/index.php?mode=xmlrpccall' + + def add_default_headers(self, headers): + headers['X-APIVERSION'] = '2' + headers['X-User'] = self.user_id + headers['X-Pass'] = self.key + return headers + + +class VCLNodeDriver(NodeDriver): + """ + VCL node driver + + :keyword host: The VCL host to which you make requests(required) + :type host: ``str`` + """ + + NODE_STATE_MAP = { + 'ready': NodeState.RUNNING, + 'failed': NodeState.TERMINATED, + 'timedout': NodeState.TERMINATED, + 'loading': NodeState.PENDING, + 'time': NodeState.PENDING, + 'future': NodeState.PENDING, + 'error': NodeState.UNKNOWN, + 'notready': NodeState.PENDING, + 'notavailable': NodeState.TERMINATED, + 'success': NodeState.PENDING + } + + connectionCls = VCLConnection + name = 'VCL' + website = 'http://incubator.apache.org/vcl/' + type = Provider.VCL + + def __init__(self, key, secret, secure=True, host=None, port=None, *args, + **kwargs): + """ + :param key: API key or username to used (required) + :type key: ``str`` + + :param secret: Secret password to be used (required) + :type secret: ``str`` + + :param secure: Weither to use HTTPS or HTTP. + :type secure: ``bool`` + + :param host: Override hostname used for connections. (required) + :type host: ``str`` + + :param port: Override port used for connections. + :type port: ``int`` + + :rtype: ``None`` + """ + if not host: + raise Exception('When instantiating VCL driver directly ' + + 'you also need to provide host') + + super(VCLNodeDriver, self).__init__(key, secret, secure=True, + host=None, port=None, *args, + **kwargs) + + def _vcl_request(self, method, *args): + res = self.connection.request( + method, + *args + ).object + if(res['status'] == 'error'): + raise LibcloudError(res['errormsg'], driver=self) + return res + + def create_node(self, **kwargs): + """Create a new VCL reservation + size and name ignored, image is the id from list_image + + @inherits: :class:`NodeDriver.create_node` + + :keyword image: image is the id from list_image + :type image: ``str`` + + :keyword start: start time as unix timestamp + :type start: ``str`` + + :keyword length: length of time in minutes + :type length: ``str`` + """ + + image = kwargs["image"] + start = kwargs.get('start', int(time.time())) + length = kwargs.get('length', '60') + + res = self._vcl_request( + "XMLRPCaddRequest", + image.id, + start, + length + ) + + return Node( + id=res['requestid'], + name=image.name, + state=self.NODE_STATE_MAP[res['status']], + public_ips=[], + private_ips=[], + driver=self, + image=image.name + ) + + def destroy_node(self, node): + """ + End VCL reservation for the node passed in. + Throws error if request fails. + + :param node: The node to be destroyed + :type node: :class:`Node` + + :rtype: ``bool`` + """ + try: + self._vcl_request( + 'XMLRPCendRequest', + node.id + ) + except LibcloudError: + return False + return True + + def _to_image(self, img): + return NodeImage( + id=img['id'], + name=img['name'], + driver=self.connection.driver + ) + + def list_images(self, location=None): + """ + List images available to the user provided credentials + + @inherits: :class:`NodeDriver.list_images` + """ + res = self.connection.request( + "XMLRPCgetImages" + ).object + return [self._to_image(i) for i in res] + + def list_sizes(self, location=None): + """ + VCL does not choosing sizes for node creation. + Size of images are statically set by administrators. + + @inherits: :class:`NodeDriver.list_sizes` + """ + return [NodeSize( + 't1.micro', + 'none', + '512', + 0, 0, 0, self) + ] + + def _to_connect_data(self, request_id, ipaddr): + res = self._vcl_request( + "XMLRPCgetRequestConnectData", + request_id, + ipaddr + ) + return res + + def _to_status(self, requestid, imagename, ipaddr): + res = self._vcl_request( + "XMLRPCgetRequestStatus", + requestid + ) + + public_ips = [] + extra = [] + if(res['status'] == 'ready'): + cdata = self._to_connect_data(requestid, ipaddr) + public_ips = [cdata['serverIP']] + extra = { + 'user': cdata['user'], + 'pass': cdata['password'] + } + return Node( + id=requestid, + name=imagename, + state=self.NODE_STATE_MAP[res['status']], + public_ips=public_ips, + private_ips=[], + driver=self, + image=imagename, + extra=extra + ) + + def _to_nodes(self, res, ipaddr): + return [self._to_status( + h['requestid'], + h['imagename'], + ipaddr + ) for h in res] + + def list_nodes(self, ipaddr): + """ + List nodes + + :param ipaddr: IP address which should be used + :type ipaddr: ``str`` + + :rtype: ``list`` of :class:`Node` + """ + res = self._vcl_request( + "XMLRPCgetRequestIds" + ) + return self._to_nodes(res['requests'], ipaddr) + + def ex_update_node_access(self, node, ipaddr): + """ + Update the remote ip accessing the node. + + :param node: the reservation node to update + :type node: :class:`Node` + + :param ipaddr: the ipaddr used to access the node + :type ipaddr: ``str`` + + :return: node with updated information + :rtype: :class:`Node` + """ + return self._to_status(node.id, node.image, ipaddr) + + def ex_extend_request_time(self, node, minutes): + """ + Time in minutes to extend the requested node's reservation time + + :param node: the reservation node to update + :type node: :class:`Node` + + :param minutes: the number of mintes to update + :type minutes: ``str`` + + :return: true on success, throws error on failure + :rtype: ``bool`` + """ + return self._vcl_request( + "XMLRPCextendRequest", + node.id, + minutes + ) + + def ex_get_request_end_time(self, node): + """ + Get the ending time of the node reservation. + + :param node: the reservation node to update + :type node: :class:`Node` + + :return: unix timestamp + :rtype: ``int`` + """ + res = self._vcl_request( + "XMLRPCgetRequestIds" + ) + time = 0 + for i in res['requests']: + if i['requestid'] == node.id: + time = i['end'] + return time diff -Nru libcloud-0.5.0/libcloud/compute/drivers/voxel.py libcloud-0.15.1/libcloud/compute/drivers/voxel.py --- libcloud-0.5.0/libcloud/compute/drivers/voxel.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/voxel.py 2013-11-29 12:35:04.000000000 +0000 @@ -19,9 +19,9 @@ import datetime import hashlib -from xml.etree import ElementTree as ET +from libcloud.utils.py3 import b -from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.common.base import XmlResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState @@ -30,17 +30,18 @@ VOXEL_API_HOST = "api.voxel.net" -class VoxelResponse(Response): - def __init__(self, response): +class VoxelResponse(XmlResponse): + def __init__(self, response, connection): self.parsed = None - super(VoxelResponse, self).__init__(response) + super(VoxelResponse, self).__init__(response=response, + connection=connection) def parse_body(self): if not self.body: return None if not self.parsed: - self.parsed = ET.XML(self.body) + self.parsed = super(VoxelResponse, self).parse_body() return self.parsed def parse_error(self): @@ -48,7 +49,7 @@ if not self.body: return None if not self.parsed: - self.parsed = ET.XML(self.body) + self.parsed = super(VoxelResponse, self).parse_body() for err in self.parsed.findall('err'): code = err.get('code') err_list.append("(%s) %s" % (code, err.get('msg'))) @@ -63,12 +64,13 @@ def success(self): if not self.parsed: - self.parsed = ET.XML(self.body) + self.parsed = super(VoxelResponse, self).parse_body() stat = self.parsed.get('stat') if stat != "ok": return False return True + class VoxelConnection(ConnectionUserAndKey): """ Connection class for the Voxel driver @@ -78,24 +80,22 @@ responseCls = VoxelResponse def add_default_params(self, params): + params = dict([(k, v) for k, v in list(params.items()) + if v is not None]) params["key"] = self.user_id - params["timestamp"] = datetime.datetime.utcnow().isoformat()+"+0000" - - for param in params.keys(): - if params[param] is None: - del params[param] + params["timestamp"] = datetime.datetime.utcnow().isoformat() + "+0000" - keys = params.keys() + keys = list(params.keys()) keys.sort() md5 = hashlib.md5() - md5.update(self.key) + md5.update(b(self.key)) for key in keys: if params[key]: if not params[key] is None: - md5.update("%s%s"% (key, params[key])) + md5.update(b("%s%s" % (key, params[key]))) else: - md5.update(key) + md5.update(b(key)) params['api_sig'] = md5.hexdigest() return params @@ -111,6 +111,7 @@ 'unknown': NodeState.UNKNOWN, } + class VoxelNodeDriver(NodeDriver): """ Voxel VoxCLOUD node driver @@ -119,9 +120,10 @@ connectionCls = VoxelConnection type = Provider.VOXEL name = 'Voxel VoxCLOUD' + website = 'http://www.voxel.net/' def _initialize_instance_types(): - for cpus in range(1,14): + for cpus in range(1, 14): if cpus == 1: name = "Single CPU" else: @@ -129,16 +131,16 @@ id = "%dcpu" % cpus ram = cpus * RAM_PER_CPU - VOXEL_INSTANCE_TYPES[id]= { - 'id': id, - 'name': name, - 'ram': ram, - 'disk': None, - 'bandwidth': None, - 'price': None} + VOXEL_INSTANCE_TYPES[id] = { + 'id': id, + 'name': name, + 'ram': ram, + 'disk': None, + 'bandwidth': None, + 'price': None} features = {"create_node": [], - "list_sizes": ["variable_disk"]} + "list_sizes": ["variable_disk"]} _initialize_instance_types() @@ -148,8 +150,8 @@ return self._to_nodes(result) def list_sizes(self, location=None): - return [ NodeSize(driver=self.connection.driver, **i) - for i in VOXEL_INSTANCE_TYPES.values() ] + return [NodeSize(driver=self.connection.driver, **i) + for i in list(VOXEL_INSTANCE_TYPES.values())] def list_images(self, location=None): params = {"method": "voxel.images.list"} @@ -159,45 +161,47 @@ def create_node(self, **kwargs): """Create Voxel Node - @keyword name: the name to assign the node (mandatory) - @type name: C{str} + :keyword name: the name to assign the node (mandatory) + :type name: ``str`` - @keyword image: distribution to deploy - @type image: L{NodeImage} + :keyword image: distribution to deploy + :type image: :class:`NodeImage` - @keyword size: the plan size to create (mandatory) + :keyword size: the plan size to create (mandatory) Requires size.disk (GB) to be set manually - @type size: L{NodeSize} + :type size: :class:`NodeSize` - @keyword location: which datacenter to create the node in - @type location: L{NodeLocation} + :keyword location: which datacenter to create the node in + :type location: :class:`NodeLocation` - @keyword ex_privateip: Backend IP address to assign to node; + :keyword ex_privateip: Backend IP address to assign to node; must be chosen from the customer's private VLAN assignment. - @type ex_privateip: C{str} + :type ex_privateip: ``str`` - @keyword ex_publicip: Public-facing IP address to assign to node; + :keyword ex_publicip: Public-facing IP address to assign to node; must be chosen from the customer's public VLAN assignment. - @type ex_publicip: C{str} + :type ex_publicip: ``str`` - @keyword ex_rootpass: Password for root access; generated if unset. - @type ex_rootpass: C{str} + :keyword ex_rootpass: Password for root access; generated if unset. + :type ex_rootpass: ``str`` - @keyword ex_consolepass: Password for remote console; + :keyword ex_consolepass: Password for remote console; generated if unset. - @type ex_consolepass: C{str} + :type ex_consolepass: ``str`` - @keyword ex_sshuser: Username for SSH access - @type ex_sshuser: C{str} + :keyword ex_sshuser: Username for SSH access + :type ex_sshuser: ``str`` - @keyword ex_sshpass: Password for SSH access; generated if unset. - @type ex_sshpass: C{str} + :keyword ex_sshpass: Password for SSH access; generated if unset. + :type ex_sshpass: ``str`` - @keyword ex_voxel_access: Allow access Voxel administrative access. + :keyword ex_voxel_access: Allow access Voxel administrative access. Defaults to False. - @type ex_voxel_access: C{bool} + :type ex_voxel_access: ``bool`` + + :rtype: :class:`Node` or ``None`` """ # assert that disk > 0 @@ -210,51 +214,47 @@ voxel_access = "true" if voxel_access else "false" params = { - 'method': 'voxel.voxcloud.create', - 'hostname': kwargs["name"], - 'disk_size': int(kwargs["size"].disk), - 'facility': kwargs["location"].id, - 'image_id': kwargs["image"].id, + 'method': 'voxel.voxcloud.create', + 'hostname': kwargs["name"], + 'disk_size': int(kwargs["size"].disk), + 'facility': kwargs["location"].id, + 'image_id': kwargs["image"].id, 'processing_cores': kwargs["size"].ram / RAM_PER_CPU, - 'backend_ip': kwargs.get("ex_privateip", None), - 'frontend_ip': kwargs.get("ex_publicip", None), - 'admin_password': kwargs.get("ex_rootpass", None), + 'backend_ip': kwargs.get("ex_privateip", None), + 'frontend_ip': kwargs.get("ex_publicip", None), + 'admin_password': kwargs.get("ex_rootpass", None), 'console_password': kwargs.get("ex_consolepass", None), - 'ssh_username': kwargs.get("ex_sshuser", None), - 'ssh_password': kwargs.get("ex_sshpass", None), - 'voxel_access': voxel_access, + 'ssh_username': kwargs.get("ex_sshuser", None), + 'ssh_password': kwargs.get("ex_sshpass", None), + 'voxel_access': voxel_access, } object = self.connection.request('/', params=params).object if self._getstatus(object): return Node( - id = object.findtext("device/id"), - name = kwargs["name"], - state = NODE_STATE_MAP[object.findtext("device/status")], - public_ip = kwargs.get("publicip", None), - private_ip = kwargs.get("privateip", None), - driver = self.connection.driver + id=object.findtext("device/id"), + name=kwargs["name"], + state=NODE_STATE_MAP[object.findtext("device/status")], + public_ips=kwargs.get("publicip", None), + private_ips=kwargs.get("privateip", None), + driver=self.connection.driver ) else: return None def reboot_node(self, node): - """ - Reboot the node by passing in the node object - """ params = {'method': 'voxel.devices.power', 'device_id': node.id, 'power_action': 'reboot'} - return self._getstatus(self.connection.request('/', params=params).object) + return self._getstatus( + self.connection.request('/', params=params).object) def destroy_node(self, node): - """ - Destroy node by passing in the node object - """ params = {'method': 'voxel.voxcloud.delete', 'device_id': node.id} - return self._getstatus(self.connection.request('/', params=params).object) + return self._getstatus( + self.connection.request('/', params=params).object) def list_locations(self): params = {"method": "voxel.voxcloud.facilities.list"} @@ -266,7 +266,6 @@ status = element.attrib["stat"] return status == "ok" - def _to_locations(self, object): return [NodeLocation(element.attrib["label"], element.findtext("description"), @@ -286,23 +285,23 @@ public_ip = private_ip = None ipassignments = element.findall("ipassignments/ipassignment") for ip in ipassignments: - if ip.attrib["type"] =="frontend": + if ip.attrib["type"] == "frontend": public_ip = ip.text elif ip.attrib["type"] == "backend": private_ip = ip.text - nodes.append(Node(id= element.attrib['id'], - name=element.attrib['label'], - state=state, - public_ip= public_ip, - private_ip= private_ip, - driver=self.connection.driver)) + nodes.append(Node(id=element.attrib['id'], + name=element.attrib['label'], + state=state, + public_ips=public_ip, + private_ips=private_ip, + driver=self.connection.driver)) return nodes def _to_images(self, object): images = [] for element in object.findall("images/image"): - images.append(NodeImage(id = element.attrib["id"], - name = element.attrib["summary"], - driver = self.connection.driver)) + images.append(NodeImage(id=element.attrib["id"], + name=element.attrib["summary"], + driver=self.connection.driver)) return images diff -Nru libcloud-0.5.0/libcloud/compute/drivers/vpsnet.py libcloud-0.15.1/libcloud/compute/drivers/vpsnet.py --- libcloud-0.5.0/libcloud/compute/drivers/vpsnet.py 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/drivers/vpsnet.py 2013-11-29 12:35:04.000000000 +0000 @@ -18,12 +18,14 @@ import base64 try: - import json -except: import simplejson as json +except ImportError: + import json -from libcloud.common.base import ConnectionUserAndKey, Response -from libcloud.common.types import InvalidCredsError +from libcloud.utils.py3 import b + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.common.types import InvalidCredsError, MalformedResponseError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver @@ -37,13 +39,11 @@ BANDWIDTH_PER_NODE = 250 -class VPSNetResponse(Response): - +class VPSNetResponse(JsonResponse): def parse_body(self): try: - js = json.loads(self.body) - return js - except ValueError: + return super(VPSNetResponse, self).parse_body() + except MalformedResponseError: return self.body def success(self): @@ -54,12 +54,13 @@ def parse_error(self): try: - errors = json.loads(self.body)['errors'][0] - except ValueError: + errors = super(VPSNetResponse, self).parse_body()['errors'][0] + except MalformedResponseError: return self.body else: return "\n".join(errors) + class VPSNetConnection(ConnectionUserAndKey): """ Connection class for the VPS.net driver @@ -68,11 +69,14 @@ host = API_HOST responseCls = VPSNetResponse + allow_insecure = False + def add_default_headers(self, headers): - user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) - headers['Authorization'] = 'Basic %s' % (user_b64) + user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) + headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers + class VPSNetNodeDriver(NodeDriver): """ VPS.net node driver @@ -81,6 +85,7 @@ type = Provider.VPSNET api_name = 'vps_net' name = "vps.net" + website = 'http://vps.net/' connectionCls = VPSNetConnection def _to_node(self, vm): @@ -92,9 +97,10 @@ n = Node(id=vm['id'], name=vm['label'], state=state, - public_ip=[vm.get('primary_ip_address', None)], - private_ip=[], - extra={'slices_count':vm['slices_count']}, # Number of nodes consumed by VM + public_ips=[vm.get('primary_ip_address', None)], + private_ips=[], + extra={'slices_count': vm['slices_count']}, + # Number of nodes consumed by VM driver=self.connection.driver) return n @@ -122,39 +128,42 @@ def create_node(self, name, image, size, **kwargs): """Create a new VPS.net node - See L{NodeDriver.create_node} for more keyword args. - @keyword ex_backups_enabled: Enable automatic backups - @type ex_backups_enabled: C{bool} + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_backups_enabled: Enable automatic backups + :type ex_backups_enabled: ``bool`` - @keyword ex_fqdn: Fully Qualified domain of the node - @type ex_fqdn: C{string} + :keyword ex_fqdn: Fully Qualified domain of the node + :type ex_fqdn: ``str`` """ headers = {'Content-Type': 'application/json'} request = {'virtual_machine': - {'label': name, - 'fqdn': kwargs.get('ex_fqdn', ''), - 'system_template_id': image.id, - 'backups_enabled': kwargs.get('ex_backups_enabled', 0), - 'slices_required': size.id}} + {'label': name, + 'fqdn': kwargs.get('ex_fqdn', ''), + 'system_template_id': image.id, + 'backups_enabled': kwargs.get('ex_backups_enabled', 0), + 'slices_required': size.id}} res = self.connection.request('/virtual_machines.%s' % (API_VERSION,), - data=json.dumps(request), - headers=headers, - method='POST') + data=json.dumps(request), + headers=headers, + method='POST') node = self._to_node(res.object['virtual_machine']) return node def reboot_node(self, node): - res = self.connection.request('/virtual_machines/%s/%s.%s' % - (node.id, 'reboot', API_VERSION), - method="POST") + res = self.connection.request( + '/virtual_machines/%s/%s.%s' % (node.id, + 'reboot', + API_VERSION), + method="POST") node = self._to_node(res.object['virtual_machine']) return True def list_sizes(self, location=None): res = self.connection.request('/nodes.%s' % (API_VERSION,)) available_nodes = len([size for size in res.object - if size['slice']['virtual_machine_id']]) + if size['slice']['virtual_machine_id']]) sizes = [self._to_size(i) for i in range(1, available_nodes + 1)] return sizes diff -Nru libcloud-0.5.0/libcloud/compute/__init__.py libcloud-0.15.1/libcloud/compute/__init__.py --- libcloud-0.5.0/libcloud/compute/__init__.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/__init__.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +""" +Module for working with Cloud Servers +""" diff -Nru libcloud-0.5.0/libcloud/compute/providers.py libcloud-0.15.1/libcloud/compute/providers.py --- libcloud-0.5.0/libcloud/compute/providers.py 2011-05-08 22:38:53.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/providers.py 2014-06-11 14:27:59.000000000 +0000 @@ -16,8 +16,10 @@ Provider related utilities """ -from libcloud.utils import get_driver as _get_provider_driver -from libcloud.compute.types import Provider +from libcloud.utils.misc import get_driver as _get_provider_driver +from libcloud.utils.misc import set_driver as _set_provider_driver +from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS +from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING __all__ = [ "Provider", @@ -26,66 +28,148 @@ DRIVERS = { Provider.DUMMY: - ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), + ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), Provider.EC2_US_EAST: - ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'), + ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'), Provider.EC2_EU_WEST: - ('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'), + ('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'), Provider.EC2_US_WEST: - ('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'), + ('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'), + Provider.EC2_US_WEST_OREGON: + ('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'), Provider.EC2_AP_SOUTHEAST: - ('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'), + ('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'), Provider.EC2_AP_NORTHEAST: - ('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'), + ('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'), + Provider.EC2_SA_EAST: + ('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'), + Provider.EC2_AP_SOUTHEAST2: + ('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'), Provider.ECP: - ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'), + ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'), + Provider.ELASTICHOSTS: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'), Provider.ELASTICHOSTS_UK1: - ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'), + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'), Provider.ELASTICHOSTS_UK2: - ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'), + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'), Provider.ELASTICHOSTS_US1: - ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'), + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'), + Provider.ELASTICHOSTS_US2: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'), + Provider.ELASTICHOSTS_US3: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'), + Provider.ELASTICHOSTS_CA1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'), + Provider.ELASTICHOSTS_AU1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'), + Provider.ELASTICHOSTS_CN1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'), + Provider.SKALICLOUD: + ('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'), + Provider.SERVERLOVE: + ('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'), Provider.CLOUDSIGMA: - ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaZrhNodeDriver'), + ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'), + Provider.GCE: + ('libcloud.compute.drivers.gce', 'GCENodeDriver'), Provider.GOGRID: - ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), + ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), Provider.RACKSPACE: - ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), - Provider.RACKSPACE_UK: - ('libcloud.compute.drivers.rackspace', 'RackspaceUKNodeDriver'), - Provider.SLICEHOST: - ('libcloud.compute.drivers.slicehost', 'SlicehostNodeDriver'), + ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), + Provider.RACKSPACE_FIRST_GEN: + ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'), + Provider.HPCLOUD: + ('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'), + Provider.KILI: + ('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'), Provider.VPSNET: - ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), + ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), Provider.LINODE: - ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'), + ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'), Provider.RIMUHOSTING: - ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'), + ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'), Provider.VOXEL: - ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'), + ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'), Provider.SOFTLAYER: - ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'), + ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'), Provider.EUCALYPTUS: - ('libcloud.compute.drivers.ec2', 'EucNodeDriver'), + ('libcloud.compute.drivers.ec2', 'EucNodeDriver'), Provider.IBM: - ('libcloud.compute.drivers.ibm_sbc', 'IBMNodeDriver'), + ('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'), Provider.OPENNEBULA: - ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'), + ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'), Provider.DREAMHOST: - ('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'), + ('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'), Provider.BRIGHTBOX: - ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'), + ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'), Provider.NIMBUS: - ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'), + ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'), Provider.BLUEBOX: - ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'), + ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'), Provider.GANDI: - ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'), + ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'), Provider.OPSOURCE: - ('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'), + ('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'), Provider.OPENSTACK: - ('libcloud.compute.drivers.rackspace', 'OpenStackNodeDriver'), + ('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'), + Provider.NINEFOLD: + ('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'), + Provider.VCLOUD: + ('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'), + Provider.TERREMARK: + ('libcloud.compute.drivers.vcloud', 'TerremarkDriver'), + Provider.CLOUDSTACK: + ('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'), + Provider.LIBVIRT: + ('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'), + Provider.JOYENT: + ('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'), + Provider.VCL: + ('libcloud.compute.drivers.vcl', 'VCLNodeDriver'), + Provider.KTUCLOUD: + ('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'), + Provider.HOSTVIRTUAL: + ('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'), + Provider.ABIQUO: + ('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'), + Provider.DIGITAL_OCEAN: + ('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'), + Provider.NEPHOSCALE: + ('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'), + Provider.CLOUDFRAMES: + ('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'), + Provider.EXOSCALE: + ('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'), + Provider.IKOULA: + ('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'), + Provider.OUTSCALE_SAS: + ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'), + Provider.OUTSCALE_INC: + ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'), + + # Deprecated + Provider.CLOUDSIGMA_US: + ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'), } + def get_driver(provider): + if provider in DEPRECATED_RACKSPACE_PROVIDERS: + id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()]) + old_name = id_to_name_map[provider] + new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]] + + url = 'http://s.apache.org/lc0140un' + msg = ('Provider constant %s has been removed. New constant ' + 'is now called %s.\n' + 'For more information on this change and how to modify your ' + 'code to work with it, please visit: %s' % + (old_name, new_name, url)) + raise Exception(msg) + return _get_provider_driver(DRIVERS, provider) + + +def set_driver(provider, module, klass): + return _set_provider_driver(DRIVERS, provider, module, klass) diff -Nru libcloud-0.5.0/libcloud/compute/ssh.py libcloud-0.15.1/libcloud/compute/ssh.py --- libcloud-0.5.0/libcloud/compute/ssh.py 2011-05-10 15:36:07.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/ssh.py 2014-06-11 14:27:59.000000000 +0000 @@ -14,8 +14,9 @@ # limitations under the License. """ -Wraps multiple ways to communicate over SSH +Wraps multiple ways to communicate over SSH. """ + have_paramiko = False try: @@ -28,7 +29,48 @@ # warning on Python 2.6. # Ref: https://bugs.launchpad.net/paramiko/+bug/392973 +import os +import time +import subprocess +import logging +import warnings + from os.path import split as psplit +from os.path import join as pjoin + +from libcloud.utils.logging import ExtraLogFormatter +from libcloud.utils.py3 import StringIO + +__all__ = [ + 'BaseSSHClient', + 'ParamikoSSHClient', + 'ShellOutSSHClient', + + 'SSHCommandTimeoutError' +] + + +# Maximum number of bytes to read at once from a socket +CHUNK_SIZE = 1024 + + +class SSHCommandTimeoutError(Exception): + """ + Exception which is raised when an SSH command times out. + """ + def __init__(self, cmd, timeout): + self.cmd = cmd + self.timeout = timeout + message = 'Command didn\'t finish in %s seconds' % (timeout) + super(SSHCommandTimeoutError, self).__init__(message) + + def __repr__(self): + return ('' % + (self.cmd, self.timeout)) + + def __str__(self): + return self.message + class BaseSSHClient(object): """ @@ -36,115 +78,204 @@ """ def __init__(self, hostname, port=22, username='root', password=None, - key=None, timeout=None): + key=None, key_files=None, timeout=None): """ - @type hostname: C{str} - @keyword hostname: Hostname or IP address to connect to. + :type hostname: ``str`` + :keyword hostname: Hostname or IP address to connect to. - @type port: C{int} - @keyword port: TCP port to communicate on, defaults to 22. + :type port: ``int`` + :keyword port: TCP port to communicate on, defaults to 22. - @type username: C{str} - @keyword username: Username to use, defaults to root. + :type username: ``str`` + :keyword username: Username to use, defaults to root. - @type password: C{str} - @keyword password: Password to authenticate with. + :type password: ``str`` + :keyword password: Password to authenticate with or a password used + to unlock a private key if a password protected key + is used. - @type key: C{list} - @keyword key: Private SSH keys to authenticate with. + :param key: Deprecated in favor of ``key_files`` argument. + + :type key_files: ``str`` or ``list`` + :keyword key_files: A list of paths to the private key files to use. """ + if key is not None: + message = ('You are using deprecated "key" argument which has ' + 'been replaced with "key_files" argument') + warnings.warn(message, DeprecationWarning) + + # key_files has precedent + key_files = key if not key_files else key_files + self.hostname = hostname self.port = port self.username = username self.password = password - self.key = key + self.key_files = key_files self.timeout = timeout def connect(self): """ Connect to the remote node over SSH. - @return: C{bool} + :return: True if the connection has been successfuly established, False + otherwise. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'connect not implemented for this ssh client' + raise NotImplementedError( + 'connect not implemented for this ssh client') - def put(self, path, contents=None, chmod=None): + def put(self, path, contents=None, chmod=None, mode='w'): """ Upload a file to the remote node. - @type path: C{str} - @keyword path: File path on the remote node. + :type path: ``str`` + :keyword path: File path on the remote node. + + :type contents: ``str`` + :keyword contents: File Contents. - @type contents: C{str} - @keyword contents: File Contents. + :type chmod: ``int`` + :keyword chmod: chmod file to this after creation. - @type chmod: C{int} - @keyword chmod: chmod file to this after creation. + :type mode: ``str`` + :keyword mode: Mode in which the file is opened. + + :return: Full path to the location where a file has been saved. + :rtype: ``str`` """ - raise NotImplementedError, \ - 'put not implemented for this ssh client' + raise NotImplementedError( + 'put not implemented for this ssh client') def delete(self, path): """ Delete/Unlink a file on the remote node. - @type path: C{str} - @keyword path: File path on the remote node. + :type path: ``str`` + :keyword path: File path on the remote node. + + :return: True if the file has been successfuly deleted, False + otherwise. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'delete not implemented for this ssh client' + raise NotImplementedError( + 'delete not implemented for this ssh client') def run(self, cmd): """ Run a command on a remote node. - @type cmd: C{str} - @keyword cmd: Command to run. + :type cmd: ``str`` + :keyword cmd: Command to run. - @return C{list} of [stdout, stderr, exit_status] + :return ``list`` of [stdout, stderr, exit_status] """ - raise NotImplementedError, \ - 'run not implemented for this ssh client' + raise NotImplementedError( + 'run not implemented for this ssh client') def close(self): """ Shutdown connection to the remote node. - """ - raise NotImplementedError, \ - 'close not implemented for this ssh client' + + :return: True if the connection has been successfuly closed, False + otherwise. + :rtype: ``bool`` + """ + raise NotImplementedError( + 'close not implemented for this ssh client') + + def _get_and_setup_logger(self): + logger = logging.getLogger('libcloud.compute.ssh') + path = os.getenv('LIBCLOUD_DEBUG') + + if path: + handler = logging.FileHandler(path) + handler.setFormatter(ExtraLogFormatter()) + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) + + return logger + class ParamikoSSHClient(BaseSSHClient): + """ A SSH Client powered by Paramiko. """ def __init__(self, hostname, port=22, username='root', password=None, - key=None, timeout=None): - super(ParamikoSSHClient, self).__init__(hostname, port, username, - password, key, timeout) + key=None, key_files=None, key_material=None, timeout=None): + """ + Authentication is always attempted in the following order: + + - The key passed in (if key is provided) + - Any key we can find through an SSH agent (only if no password and + key is provided) + - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no + password and key is provided) + - Plain username/password auth, if a password was given (if password is + provided) + """ + if key_files and key_material: + raise ValueError(('key_files and key_material arguments are ' + 'mutually exclusive')) + + super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port, + username=username, + password=password, + key=key, + key_files=key_files, + timeout=timeout) + + self.key_material = key_material + self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.logger = self._get_and_setup_logger() def connect(self): conninfo = {'hostname': self.hostname, 'port': self.port, 'username': self.username, - 'password': self.password, 'allow_agent': False, 'look_for_keys': False} + if self.password: + conninfo['password'] = self.password + + if self.key_files: + conninfo['key_filename'] = self.key_files + + if self.key_material: + conninfo['pkey'] = self._get_pkey_object(key=self.key_material) + + if not self.password and not (self.key_files or self.key_material): + conninfo['allow_agent'] = True + conninfo['look_for_keys'] = True + if self.timeout: conninfo['timeout'] = self.timeout + extra = {'_hostname': self.hostname, '_port': self.port, + '_username': self.username, '_timeout': self.timeout} + self.logger.debug('Connecting to server', extra=extra) + self.client.connect(**conninfo) return True - def put(self, path, contents=None, chmod=None): + def put(self, path, contents=None, chmod=None, mode='w'): + extra = {'_path': path, '_mode': mode, '_chmod': chmod} + self.logger.debug('Uploading file', extra=extra) + sftp = self.client.open_sftp() # less than ideal, but we need to mkdir stuff otherwise file() fails head, tail = psplit(path) + if path[0] == "/": sftp.chdir("/") + else: + # Relative path - start from a home directory (~) + sftp.chdir('.') + for part in head.split("/"): if part != "": try: @@ -154,41 +285,246 @@ # catch EEXIST consistently *sigh* pass sftp.chdir(part) - ak = sftp.file(tail, mode='w') + + cwd = sftp.getcwd() + + ak = sftp.file(tail, mode=mode) ak.write(contents) if chmod is not None: ak.chmod(chmod) ak.close() sftp.close() + if path[0] == '/': + file_path = path + else: + file_path = pjoin(cwd, path) + + return file_path + def delete(self, path): + extra = {'_path': path} + self.logger.debug('Deleting file', extra=extra) + sftp = self.client.open_sftp() sftp.unlink(path) sftp.close() + return True - def run(self, cmd): - # based on exec_command() + def run(self, cmd, timeout=None): + """ + Note: This function is based on paramiko's exec_command() + method. + + :param timeout: How long to wait (in seconds) for the command to + finish (optional). + :type timeout: ``float`` + """ + extra = {'_cmd': cmd} + self.logger.debug('Executing command', extra=extra) + + # Use the system default buffer size bufsize = -1 - t = self.client.get_transport() - chan = t.open_session() + + transport = self.client.get_transport() + chan = transport.open_session() + + start_time = time.time() chan.exec_command(cmd) + + stdout = StringIO() + stderr = StringIO() + + # Create a stdin file and immediately close it to prevent any + # interactive script from hanging the process. stdin = chan.makefile('wb', bufsize) - stdout = chan.makefile('rb', bufsize) - stderr = chan.makefile_stderr('rb', bufsize) - #stdin, stdout, stderr = self.client.exec_command(cmd) stdin.close() + + # Receive all the output + # Note #1: This is used instead of chan.makefile approach to prevent + # buffering issues and hanging if the executed command produces a lot + # of output. + # + # Note #2: If you are going to remove "ready" checks inside the loop + # you are going to have a bad time. Trying to consume from a channel + # which is not ready will block for indefinitely. + exit_status_ready = chan.exit_status_ready() + + while not exit_status_ready: + current_time = time.time() + elapsed_time = (current_time - start_time) + + if timeout and (elapsed_time > timeout): + # TODO: Is this the right way to clean up? + chan.close() + + raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout) + + if chan.recv_ready(): + data = chan.recv(CHUNK_SIZE) + + while data: + stdout.write(data) + ready = chan.recv_ready() + + if not ready: + break + + data = chan.recv(CHUNK_SIZE) + + if chan.recv_stderr_ready(): + data = chan.recv_stderr(CHUNK_SIZE) + + while data: + stderr.write(data) + ready = chan.recv_stderr_ready() + + if not ready: + break + + data = chan.recv_stderr(CHUNK_SIZE) + + # We need to check the exist status here, because the command could + # print some output and exit during this sleep bellow. + exit_status_ready = chan.exit_status_ready() + + if exit_status_ready: + break + + # Short sleep to prevent busy waiting + time.sleep(1.5) + + # Receive the exit status code of the command we ran. status = chan.recv_exit_status() - so = stdout.read() - se = stderr.read() - return [so, se, status] + + stdout = stdout.getvalue() + stderr = stderr.getvalue() + + extra = {'_status': status, '_stdout': stdout, '_stderr': stderr} + self.logger.debug('Command finished', extra=extra) + + return [stdout, stderr, status] def close(self): + self.logger.debug('Closing server connection') + self.client.close() + return True + + def _get_pkey_object(self, key): + """ + Try to detect private key type and return paramiko.PKey object. + """ + + for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]: + try: + key = cls.from_private_key(StringIO(key)) + except paramiko.ssh_exception.SSHException: + # Invalid key, try other key type + pass + else: + return key + + msg = 'Invalid or unsupported key type' + raise paramiko.ssh_exception.SSHException(msg) + class ShellOutSSHClient(BaseSSHClient): - # TODO: write this one + """ + This client shells out to "ssh" binary to run commands on the remote + server. + + Note: This client should not be used in production. + """ + + def __init__(self, hostname, port=22, username='root', password=None, + key=None, key_files=None, timeout=None): + super(ShellOutSSHClient, self).__init__(hostname=hostname, + port=port, username=username, + password=password, + key=key, + key_files=key_files, + timeout=timeout) + if self.password: + raise ValueError('ShellOutSSHClient only supports key auth') + + child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + child.communicate() + + if child.returncode == 127: + raise ValueError('ssh client is not available') + + self.logger = self._get_and_setup_logger() + + def connect(self): + """ + This client doesn't support persistent connections establish a new + connection every time "run" method is called. + """ + return True + + def run(self, cmd): + return self._run_remote_shell_command([cmd]) + + def put(self, path, contents=None, chmod=None, mode='w'): + if mode == 'w': + redirect = '>' + elif mode == 'a': + redirect = '>>' + else: + raise ValueError('Invalid mode: ' + mode) + + cmd = ['echo "%s" %s %s' % (contents, redirect, path)] + self._run_remote_shell_command(cmd) + return path + + def delete(self, path): + cmd = ['rm', '-rf', path] + self._run_remote_shell_command(cmd) + return True + + def close(self): + return True + + def _get_base_ssh_command(self): + cmd = ['ssh'] + + if self.key_files: + cmd += ['-i', self.key_files] + + if self.timeout: + cmd += ['-oConnectTimeout=%s' % (self.timeout)] + + cmd += ['%s@%s' % (self.username, self.hostname)] + + return cmd + + def _run_remote_shell_command(self, cmd): + """ + Run a command on a remote server. + + :param cmd: Command to run. + :type cmd: ``list`` of ``str`` + + :return: Command stdout, stderr and status code. + :rtype: ``tuple`` + """ + base_cmd = self._get_base_ssh_command() + full_cmd = base_cmd + [' '.join(cmd)] + + self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd))) + + child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = child.communicate() + return (stdout, stderr, child.returncode) + + +class MockSSHClient(BaseSSHClient): pass + SSHClient = ParamikoSSHClient if not have_paramiko: - SSHClient = ShellOutSSHClient + SSHClient = MockSSHClient diff -Nru libcloud-0.5.0/libcloud/compute/types.py libcloud-0.15.1/libcloud/compute/types.py --- libcloud-0.5.0/libcloud/compute/types.py 2011-05-08 22:38:53.000000000 +0000 +++ libcloud-0.15.1/libcloud/compute/types.py 2014-06-11 14:27:59.000000000 +0000 @@ -18,6 +18,7 @@ from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.common.types import InvalidCredsError, InvalidCredsException + __all__ = [ "Provider", "NodeState", @@ -28,95 +29,221 @@ "LibcloudError", "MalformedResponseError", "InvalidCredsError", - "InvalidCredsException" - ] + "InvalidCredsException", + "DEPRECATED_RACKSPACE_PROVIDERS", + "OLD_CONSTANT_TO_NEW_MAPPING" +] + + class Provider(object): """ Defines for each of the supported providers - @cvar DUMMY: Example provider - @cvar EC2_US_EAST: Amazon AWS US N. Virgina - @cvar EC2_US_WEST: Amazon AWS US N. California - @cvar EC2_EU_WEST: Amazon AWS EU Ireland - @cvar RACKSPACE: Rackspace Cloud Servers - @cvar RACKSPACE_UK: Rackspace UK Cloud Servers - @cvar SLICEHOST: Slicehost.com - @cvar GOGRID: GoGrid - @cvar VPSNET: VPS.net - @cvar LINODE: Linode.com - @cvar VCLOUD: vmware vCloud - @cvar RIMUHOSTING: RimuHosting.com - @cvar ECP: Enomaly - @cvar IBM: IBM Developer Cloud - @cvar OPENNEBULA: OpenNebula.org - @cvar DREAMHOST: DreamHost Private Server - @cvar CLOUDSIGMA: CloudSigma - @cvar NIMBUS: Nimbus - @cvar BLUEBOX: Bluebox - @cvar OPSOURCE: Opsource Cloud - """ - DUMMY = 0 - EC2 = 1 # deprecated name - EC2_US_EAST = 1 - EC2_EU = 2 # deprecated name - EC2_EU_WEST = 2 - RACKSPACE = 3 - SLICEHOST = 4 - GOGRID = 5 - VPSNET = 6 - LINODE = 7 - VCLOUD = 8 - RIMUHOSTING = 9 - EC2_US_WEST = 10 - VOXEL = 11 - SOFTLAYER = 12 - EUCALYPTUS = 13 - ECP = 14 - IBM = 15 - OPENNEBULA = 16 - DREAMHOST = 17 - ELASTICHOSTS = 18 - ELASTICHOSTS_UK1 = 19 - ELASTICHOSTS_UK2 = 20 - ELASTICHOSTS_US1 = 21 - EC2_AP_SOUTHEAST = 22 - RACKSPACE_UK = 23 - BRIGHTBOX = 24 - CLOUDSIGMA = 25 - EC2_AP_NORTHEAST = 26 - NIMBUS = 27 - BLUEBOX = 28 - GANDI = 29 - OPSOURCE = 30 - OPENSTACK = 31 + :cvar DUMMY: Example provider + :cvar EC2_US_EAST: Amazon AWS US N. Virgina + :cvar EC2_US_WEST: Amazon AWS US N. California + :cvar EC2_EU_WEST: Amazon AWS EU Ireland + :cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers + :cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers + :cvar GCE: Google Compute Engine + :cvar GOGRID: GoGrid + :cvar VPSNET: VPS.net + :cvar LINODE: Linode.com + :cvar VCLOUD: vmware vCloud + :cvar RIMUHOSTING: RimuHosting.com + :cvar ECP: Enomaly + :cvar IBM: IBM Developer Cloud + :cvar OPENNEBULA: OpenNebula.org + :cvar DREAMHOST: DreamHost Private Server + :cvar ELASTICHOSTS: ElasticHosts.com + :cvar CLOUDSIGMA: CloudSigma + :cvar NIMBUS: Nimbus + :cvar BLUEBOX: Bluebox + :cvar OPSOURCE: Opsource Cloud + :cvar NINEFOLD: Ninefold + :cvar TERREMARK: Terremark + :cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon) + :cvar CLOUDSTACK: CloudStack + :cvar CLOUDSIGMA_US: CloudSigma US Las Vegas + :cvar LIBVIRT: Libvirt driver + :cvar JOYENT: Joyent driver + :cvar VCL: VCL driver + :cvar KTUCLOUD: kt ucloud driver + :cvar GRIDSPOT: Gridspot driver + :cvar ABIQUO: Abiquo driver + :cvar NEPHOSCALE: NephoScale driver + :cvar EXOSCALE: Exoscale driver. + :cvar IKOULA: Ikoula driver. + :cvar OUTSCALE_SAS: Outscale SAS driver. + :cvar OUTSCALE_INC: Outscale INC driver. + """ + DUMMY = 'dummy' + EC2 = 'ec2_us_east' + RACKSPACE = 'rackspace' + GCE = 'gce' + GOGRID = 'gogrid' + VPSNET = 'vpsnet' + LINODE = 'linode' + VCLOUD = 'vcloud' + RIMUHOSTING = 'rimuhosting' + VOXEL = 'voxel' + SOFTLAYER = 'softlayer' + EUCALYPTUS = 'eucalyptus' + ECP = 'ecp' + IBM = 'ibm' + OPENNEBULA = 'opennebula' + DREAMHOST = 'dreamhost' + ELASTICHOSTS = 'elastichosts' + BRIGHTBOX = 'brightbox' + CLOUDSIGMA = 'cloudsigma' + NIMBUS = 'nimbus' + BLUEBOX = 'bluebox' + GANDI = 'gandi' + OPSOURCE = 'opsource' + OPENSTACK = 'openstack' + SKALICLOUD = 'skalicloud' + SERVERLOVE = 'serverlove' + NINEFOLD = 'ninefold' + TERREMARK = 'terremark' + CLOUDSTACK = 'cloudstack' + LIBVIRT = 'libvirt' + JOYENT = 'joyent' + VCL = 'vcl' + KTUCLOUD = 'ktucloud' + GRIDSPOT = 'gridspot' + RACKSPACE_FIRST_GEN = 'rackspace_first_gen' + HOSTVIRTUAL = 'hostvirtual' + ABIQUO = 'abiquo' + DIGITAL_OCEAN = 'digitalocean' + NEPHOSCALE = 'nephoscale' + CLOUDFRAMES = 'cloudframes' + EXOSCALE = 'exoscale' + IKOULA = 'ikoula' + OUTSCALE_SAS = 'outscale_sas' + OUTSCALE_INC = 'outscale_inc' + + # OpenStack based providers + HPCLOUD = 'hpcloud' + KILI = 'kili' + + # Deprecated constants which are still supported + EC2_US_EAST = 'ec2_us_east' + EC2_EU = 'ec2_eu_west' # deprecated name + EC2_EU_WEST = 'ec2_eu_west' + EC2_US_WEST = 'ec2_us_west' + EC2_AP_SOUTHEAST = 'ec2_ap_southeast' + EC2_AP_NORTHEAST = 'ec2_ap_northeast' + EC2_US_WEST_OREGON = 'ec2_us_west_oregon' + EC2_SA_EAST = 'ec2_sa_east' + EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2' + + ELASTICHOSTS_UK1 = 'elastichosts_uk1' + ELASTICHOSTS_UK2 = 'elastichosts_uk2' + ELASTICHOSTS_US1 = 'elastichosts_us1' + ELASTICHOSTS_US2 = 'elastichosts_us2' + ELASTICHOSTS_US3 = 'elastichosts_us3' + ELASTICHOSTS_CA1 = 'elastichosts_ca1' + ELASTICHOSTS_AU1 = 'elastichosts_au1' + ELASTICHOSTS_CN1 = 'elastichosts_cn1' + + CLOUDSIGMA_US = 'cloudsigma_us' + + # Deprecated constants which aren't supported anymore + RACKSPACE_UK = 'rackspace_uk' + RACKSPACE_NOVA_BETA = 'rackspace_nova_beta' + RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw' + RACKSPACE_NOVA_LON = 'rackspace_nova_lon' + RACKSPACE_NOVA_ORD = 'rackspace_nova_ord' + + # Removed + # SLICEHOST = 'slicehost' + + +DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK, + Provider.RACKSPACE_NOVA_BETA, + Provider.RACKSPACE_NOVA_DFW, + Provider.RACKSPACE_NOVA_LON, + Provider.RACKSPACE_NOVA_ORD] +OLD_CONSTANT_TO_NEW_MAPPING = { + Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN, + Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN, + + Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE, + Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE, + Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE, + Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE +} + class NodeState(object): """ Standard states for a node - @cvar RUNNING: Node is running - @cvar REBOOTING: Node is rebooting - @cvar TERMINATED: Node is terminated - @cvar PENDING: Node is pending - @cvar UNKNOWN: Node state is unknown + :cvar RUNNING: Node is running. + :cvar REBOOTING: Node is rebooting. + :cvar TERMINATED: Node is terminated. This node can't be started later on. + :cvar STOPPED: Node is stopped. This node can be started later on. + :cvar PENDING: Node is pending. + :cvar UNKNOWN: Node state is unknown. """ RUNNING = 0 REBOOTING = 1 TERMINATED = 2 PENDING = 3 UNKNOWN = 4 + STOPPED = 5 + + +class Architecture(object): + """ + Image and size architectures. + + :cvar I386: i386 (32 bt) + :cvar X86_64: x86_64 (64 bit) + """ + I386 = 0 + X86_X64 = 1 + class DeploymentError(LibcloudError): """ Exception used when a Deployment Task failed. - @ivar node: L{Node} on which this exception happened, you might want to call L{Node.destroy} + :ivar node: :class:`Node` on which this exception happened, you might want + to call :func:`Node.destroy` """ - def __init__(self, node, original_exception=None): + def __init__(self, node, original_exception=None, driver=None): self.node = node self.value = original_exception + self.driver = driver + def __str__(self): - return repr(self.value) + return self.__repr__() + + def __repr__(self): + return (('' + % (self.node.id, str(self.value), str(self.driver)))) + + +class KeyPairError(LibcloudError): + error_type = 'KeyPairError' + + def __init__(self, name, driver): + self.name = name + self.value = 'Key pair with name %s does not exist' % (name) + super(KeyPairError, self).__init__(value=self.value, driver=driver) + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return ('<%s name=%s, value=%s, driver=%s>' % + (self.error_type, self.name, self.value, self.driver.name)) + + +class KeyPairDoesNotExistError(KeyPairError): + error_type = 'KeyPairDoesNotExistError' + -"""Deprecated alias of L{DeploymentException}""" +"""Deprecated alias of :class:`DeploymentException`""" DeploymentException = DeploymentError diff -Nru libcloud-0.5.0/libcloud/data/pricing.json libcloud-0.15.1/libcloud/data/pricing.json --- libcloud-0.5.0/libcloud/data/pricing.json 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/data/pricing.json 2014-06-11 14:27:59.000000000 +0000 @@ -1,132 +1,566 @@ { - "bluebox": { - "1gb": 0.15, - "2gb": 0.25, - "4gb": 0.35, - "8gb": 0.45 + "compute": { + "ec2_us_west_oregon": { + "m3.medium": "0.070", + "m3.large": "0.140", + "m3.xlarge": "0.280", + "m3.2xlarge": "0.560", + "m1.small": "0.044", + "m1.medium": "0.087", + "m1.large": "0.175", + "m1.xlarge": "0.350", + "c3.large": "0.105", + "c3.xlarge": "0.210", + "c3.2xlarge": "0.420", + "c3.4xlarge": "0.840", + "c3.8xlarge": "1.680", + "c1.medium": "0.130", + "c1.xlarge": "0.520", + "cc2.8xlarge": "2.000", + "g2.2xlarge": "0.650", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "r3.2xlarge": "0.700", + "r3.4xlarge": "1.400", + "r3.8xlarge": "2.800", + "m2.xlarge": "0.245", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "cr1.8xlarge": "3.500", + "i2.xlarge": "0.853", + "i2.2xlarge": "1.705", + "i2.4xlarge": "3.410", + "i2.8xlarge": "6.820", + "hs1.8xlarge": "4.600", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" + }, + "ec2_us_west": { + "m3.medium": "0.077", + "m3.large": "0.154", + "m3.xlarge": "0.308", + "m3.2xlarge": "0.616", + "m1.small": "0.047", + "m1.medium": "0.095", + "m1.large": "0.190", + "m1.xlarge": "0.379", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "c3.2xlarge": "0.478", + "c3.4xlarge": "0.956", + "c3.8xlarge": "1.912", + "c1.medium": "0.148", + "c1.xlarge": "0.592", + "g2.2xlarge": "0.702", + "r3.large": "0.195", + "r3.xlarge": "0.390", + "r3.2xlarge": "0.780", + "r3.4xlarge": "1.560", + "r3.8xlarge": "3.120", + "m2.xlarge": "0.275", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "i2.xlarge": "0.938", + "i2.2xlarge": "1.876", + "i2.4xlarge": "3.751", + "i2.8xlarge": "7.502", + "t1.micro": "0.025" + }, + "ec2_eu_west": { + "m3.medium": "0.077", + "m3.large": "0.154", + "m3.xlarge": "0.308", + "m3.2xlarge": "0.616", + "m1.small": "0.047", + "m1.medium": "0.095", + "m1.large": "0.190", + "m1.xlarge": "0.379", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "c3.2xlarge": "0.478", + "c3.4xlarge": "0.956", + "c3.8xlarge": "1.912", + "c1.medium": "0.148", + "c1.xlarge": "0.592", + "cc2.8xlarge": "2.250", + "g2.2xlarge": "0.702", + "cg1.4xlarge": "2.360", + "r3.large": "0.195", + "r3.xlarge": "0.390", + "r3.2xlarge": "0.780", + "r3.4xlarge": "1.560", + "r3.8xlarge": "3.120", + "m2.xlarge": "0.275", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "cr1.8xlarge": "3.750", + "i2.xlarge": "0.938", + "i2.2xlarge": "1.876", + "i2.4xlarge": "3.751", + "i2.8xlarge": "7.502", + "hs1.8xlarge": "4.900", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" + }, + "rackspacenovalon": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, + "3": 0.064, + "2": 0.032, + "performance1-2": 0.08, + "4": 0.129, + "7": 0.967, + "6": 0.516, + "5": 0.258, + "performance1-8": 0.32, + "8": 1.612 + }, + "ec2_ap_southeast_2": { + "m3.medium": "0.098", + "m3.large": "0.196", + "m3.xlarge": "0.392", + "m3.2xlarge": "0.784", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.296", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "i2.xlarge": "1.018", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "hs1.8xlarge": "5.570", + "t1.micro": "0.020" + }, + "vps_net": { + "1": 0.416 + }, + "ec2_us_east": { + "m3.medium": "0.070", + "m3.large": "0.140", + "m3.xlarge": "0.280", + "m3.2xlarge": "0.560", + "m1.small": "0.044", + "m1.medium": "0.087", + "m1.large": "0.175", + "m1.xlarge": "0.350", + "c3.large": "0.105", + "c3.xlarge": "0.210", + "c3.2xlarge": "0.420", + "c3.4xlarge": "0.840", + "c3.8xlarge": "1.680", + "c1.medium": "0.130", + "c1.xlarge": "0.520", + "cc2.8xlarge": "2.000", + "g2.2xlarge": "0.650", + "cg1.4xlarge": "2.100", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "r3.2xlarge": "0.700", + "r3.4xlarge": "1.400", + "r3.8xlarge": "2.800", + "m2.xlarge": "0.245", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "cr1.8xlarge": "3.500", + "i2.xlarge": "0.853", + "i2.2xlarge": "1.705", + "i2.4xlarge": "3.410", + "i2.8xlarge": "6.820", + "hs1.8xlarge": "4.600", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" + }, + "rackspacenovaus": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, + "3": 0.06, + "2": 0.022, + "performance1-2": 0.08, + "4": 0.12, + "7": 0.96, + "6": 0.48, + "5": 0.24, + "performance1-8": 0.32, + "8": 1.2 + }, + "ec2_sa_east": { + "m3.medium": "0.095", + "m3.large": "0.190", + "m3.xlarge": "0.381", + "m3.2xlarge": "0.761", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c1.medium": "0.179", + "c1.xlarge": "0.718", + "m2.xlarge": "0.323", + "m2.2xlarge": "0.645", + "m2.4xlarge": "1.291", + "t1.micro": "0.027" + }, + "cloudsigma_zrh": { + "high-cpu-medium": 0.211, + "standard-large": 0.381, + "micro-high-cpu": 0.381, + "standard-extra-large": 0.762, + "high-memory-double-extra-large": 1.383, + "micro-regular": 0.0548, + "standard-small": 0.0796, + "high-memory-extra-large": 0.642, + "high-cpu-extra-large": 0.78 + }, + "rackspacenovasyd": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, + "3": 0.072, + "2": 0.026, + "performance1-2": 0.08, + "4": 0.144, + "7": 1.08, + "6": 0.576, + "5": 0.288, + "performance1-8": 0.32, + "8": 1.44 + }, + "ec2_ap_northeast": { + "m3.medium": "0.101", + "m3.large": "0.203", + "m3.xlarge": "0.405", + "m3.2xlarge": "0.810", + "m1.small": "0.061", + "m1.medium": "0.122", + "m1.large": "0.243", + "m1.xlarge": "0.486", + "c3.large": "0.128", + "c3.xlarge": "0.255", + "c3.2xlarge": "0.511", + "c3.4xlarge": "1.021", + "c3.8xlarge": "2.043", + "c1.medium": "0.158", + "c1.xlarge": "0.632", + "cc2.8xlarge": "2.349", + "g2.2xlarge": "0.898", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.287", + "m2.2xlarge": "0.575", + "m2.4xlarge": "1.150", + "cr1.8xlarge": "4.105", + "i2.xlarge": "1.001", + "i2.2xlarge": "2.001", + "i2.4xlarge": "4.002", + "i2.8xlarge": "8.004", + "hs1.8xlarge": "5.400", + "hi1.4xlarge": "3.276", + "t1.micro": "0.026" + }, + "gogrid": { + "24GB": 4.56, + "512MB": 0.095, + "8GB": 1.52, + "4GB": 0.76, + "2GB": 0.38, + "1GB": 0.19, + "16GB": 3.04 + }, + "serverlove": { + "high-cpu-medium": 0.291, + "medium": 0.404, + "large": 0.534, + "small": 0.161, + "extra-large": 0.615, + "high-cpu-extra-large": 0.776 + }, + "elastichosts": { + "high-cpu-medium": 0.18, + "medium": 0.223, + "large": 0.378, + "small": 0.1, + "extra-large": 0.579, + "high-cpu-extra-large": 0.77 + }, + "rackspace": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "1": 0.015, + "performance2-90": 4.08, + "3": 0.06, + "2": 0.03, + "performance1-2": 0.08, + "4": 0.12, + "7": 0.96, + "6": 0.48, + "5": 0.24, + "performance1-8": 0.32, + "8": 1.8 + }, + "nephoscale": { + "11": 0.35, + "27": 0.0, + "48": 0.15, + "46": 0.1, + "54": 0.938, + "56": 0.75, + "50": 0.28, + "52": 0.48, + "1": 0.6, + "3": 0.063, + "5": 0.031, + "7": 0.125, + "9": 0.188 + }, + "nimbus": { + "m1.xlarge": 0.0, + "m1.small": 0.0, + "m1.large": 0.0 + }, + "gandi": { + "1": 0.02, + "small": 0.02, + "large": 0.06, + "medium": 0.03, + "x-large": 0.12 + }, + "skalicloud": { + "high-cpu-medium": 0.249, + "medium": 0.301, + "large": 0.505, + "small": 0.136, + "extra-large": 0.654, + "high-cpu-extra-large": 0.936 + }, + "bluebox": { + "4gb": 0.35, + "2gb": 0.25, + "8gb": 0.45, + "1gb": 0.15 + }, + "ec2_ap_southeast": { + "m3.medium": "0.098", + "m3.large": "0.196", + "m3.xlarge": "0.392", + "m3.2xlarge": "0.784", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.296", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "i2.xlarge": "1.018", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "hs1.8xlarge": "5.570", + "t1.micro": "0.020" + }, + "cloudsigma_lvs": { + "high-cpu-medium": 0.0, + "standard-large": 0.0, + "micro-high-cpu": 0.0, + "standard-extra-large": 0.0, + "high-memory-double-extra-large": 0.0, + "micro-regular": 0.0, + "standard-small": 0.0, + "high-memory-extra-large": 0.0, + "high-cpu-extra-large": 0.0 + }, + "dreamhost": { + "default": 115, + "high": 150, + "minimum": 15, + "maximum": 200, + "low": 50 + }, + "osc_sas_eu_west_3": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.130", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.460", + "m2.2xlarge": "0.920", + "m2.4xlarge": "1.840", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.460", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + }, + "osc_sas_eu_west_1": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.130", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.460", + "m2.2xlarge": "0.920", + "m2.4xlarge": "1.840", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.460", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + }, + "osc_sas_us_east_1": { + "t1.micro": "0.020", + "m1.small": "0.070", + "m1.medium": "0.180", + "m1.large": "0.260", + "m1.xlarge": "0.730", + "c1.medium": "0.170", + "c1.xlarge": "0.660", + "m2.xlarge": "0.460", + "m2.2xlarge": "1.020", + "m2.4xlarge": "2.040", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.610", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.550", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + }, + "osc_inc_eu_west_1": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.120", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.410", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "nv1.small": "5.220", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "4.310", + "os1.8xlarge": "4.310" + }, + "osc_inc_eu_west_3": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.120", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.410", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "nv1.small": "5.220", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "4.310", + "os1.8xlarge": "4.310" + }, + "osc_inc_us_east_1": { + "t1.micro": "0.020", + "m1.small": "0.060", + "m1.medium": "0.180", + "m1.large": "0.240", + "m1.xlarge": "0.730", + "c1.medium": "0.150", + "c1.xlarge": "0.580", + "m2.xlarge": "0.410", + "m2.2xlarge": "1.020", + "m2.4xlarge": "2.040", + "nv1.small": "5.190", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.610", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.500", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + } }, - - "rackspace": { - "1": 0.015, - "2": 0.030, - "3": 0.060, - "4": 0.120, - "5": 0.240, - "6": 0.480, - "7": 0.960 - }, - - "dreamhost": { - "minimum": 15, - "maximum": 200, - "default": 115, - "low": 50, - "high": 150 - }, - - "ec2_us_east": { - "t1.micro": 0.02, - "m1.small": 0.085, - "m1.large": 0.34, - "m1.xlarge": 0.68, - "c1.medium": 0.17, - "c1.xlarge": 0.68, - "m2.xlarge": 0.50, - "m2.2xlarge": 1.0, - "m2.4xlarge": 2.0, - "cg1.4xlarge": 2.1, - "cc1.4xlarge": 1.6 - }, - - "ec2_us_west": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28 - }, - - "ec2_eu_west": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28 - }, - - "ec2_ap_southeast": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28 - }, - - "ec2_ap_northeast": { - "t1.micro": 0.027, - "m1.small": 0.10, - "m1.large": 0.40, - "m1.xlarge": 0.80, - "c1.medium": 0.20, - "c1.xlarge": 0.80, - "m2.xlarge": 0.60, - "m2.2xlarge": 1.20, - "m2.4xlarge": 2.39 - }, - - "nimbus" : { - "m1.small": 0.0, - "m1.large": 0.0, - "m1.xlarge": 0.0 - }, - - "cloudsigma_zrh": { - "micro-regular": 0.0548, - "micro-high-cpu": 0.381, - "standard-small": 0.0796, - "standard-large": 0.381, - "standard-extra-large": 0.762, - "high-memory-extra-large": 0.642, - "high-memory-double-extra-large": 1.383, - "high-cpu-medium": 0.211, - "high-cpu-extra-large": 0.780 - }, - - "elastichosts": { - "small": 0.100, - "medium": 0.223, - "large": 0.378, - "extra-large": 0.579, - "high-cpu-medium": 0.180, - "high-cpu-extra-large": 0.770 - }, - - "gogrid": { - "512MB": 0.095, - "1GB": 0.19, - "2GB": 0.38, - "4GB": 0.76, - "8GB": 1.52, - "16GB": 3.04 - }, - - "gandi": { - "1": 0.02 - }, - - "vps_net": { - "1": 0.416 - } + "storage": {}, + "updated": 1397154837 } diff -Nru libcloud-0.5.0/libcloud/deployment.py libcloud-0.15.1/libcloud/deployment.py --- libcloud-0.5.0/libcloud/deployment.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/deployment.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.deployment import ( # pylint: disable-msg=W0611 - Deployment, - SSHKeyDeployment, - ScriptDeployment, - MultiStepDeployment - ) - -__all__ = [ - "Deployment", - "SSHKeyDeployment", - "ScriptDeployment", - "MultiStepDeployment" - ] - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/dns/base.py libcloud-0.15.1/libcloud/dns/base.py --- libcloud-0.5.0/libcloud/dns/base.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/base.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,486 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import datetime + +from libcloud import __version__ +from libcloud.common.base import ConnectionUserAndKey, BaseDriver +from libcloud.dns.types import RecordType + +__all__ = [ + 'Zone', + 'Record', + 'DNSDriver' +] + + +class Zone(object): + """ + DNS zone. + """ + + def __init__(self, id, domain, type, ttl, driver, extra=None): + """ + :param id: Zone id. + :type id: ``str`` + + :param domain: The name of the domain. + :type domain: ``str`` + + :param type: Zone type (master, slave). + :type type: ``str`` + + :param ttl: Default TTL for records in this zone (in seconds). + :type ttl: ``int`` + + :param driver: DNSDriver instance. + :type driver: :class:`DNSDriver` + + :param extra: (optional) Extra attributes (driver specific). + :type extra: ``dict`` + """ + self.id = str(id) if id else None + self.domain = domain + self.type = type + self.ttl = ttl or None + self.driver = driver + self.extra = extra or {} + + def list_records(self): + return self.driver.list_records(zone=self) + + def create_record(self, name, type, data, extra=None): + return self.driver.create_record(name=name, zone=self, type=type, + data=data, extra=extra) + + def update(self, domain=None, type=None, ttl=None, extra=None): + return self.driver.update_zone(zone=self, domain=domain, type=type, + ttl=ttl, extra=extra) + + def delete(self): + return self.driver.delete_zone(zone=self) + + def export_to_bind_format(self): + return self.driver.export_zone_to_bind_format(zone=self) + + def export_to_bind_zone_file(self, file_path): + self.driver.export_zone_to_bind_zone_file(zone=self, + file_path=file_path) + + def __repr__(self): + return ('' % + (self.domain, self.ttl, self.driver.name)) + + +class Record(object): + """ + Zone record / resource. + """ + + def __init__(self, id, name, type, data, zone, driver, extra=None): + """ + :param id: Record id + :type id: ``str`` + + :param name: Hostname or FQDN. + :type name: ``str`` + + :param type: DNS record type (A, AAAA, ...). + :type type: :class:`RecordType` + + :param data: Data for the record (depends on the record type). + :type data: ``str`` + + :param zone: Zone instance. + :type zone: :class:`Zone` + + :param driver: DNSDriver instance. + :type driver: :class:`DNSDriver` + + :param extra: (optional) Extra attributes (driver specific). + :type extra: ``dict`` + """ + self.id = str(id) if id else None + self.name = name + self.type = type + self.data = data + self.zone = zone + self.driver = driver + self.extra = extra or {} + + def update(self, name=None, type=None, data=None, extra=None): + return self.driver.update_record(record=self, name=name, type=type, + data=data, extra=extra) + + def delete(self): + return self.driver.delete_record(record=self) + + def _get_numeric_id(self): + record_id = self.id + + if record_id.isdigit(): + record_id = int(record_id) + + return record_id + + def __repr__(self): + return ('' % + (self.zone.id, self.name, self.type, self.data, + self.driver.name)) + + +class DNSDriver(BaseDriver): + """ + A base DNSDriver class to derive from + + This class is always subclassed by a specific driver. + """ + connectionCls = ConnectionUserAndKey + name = None + website = None + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + **kwargs): + """ + :param key: API key or username to used (required) + :type key: ``str`` + + :param secret: Secret password to be used (required) + :type secret: ``str`` + + :param secure: Weither to use HTTPS or HTTP. Note: Some providers + only support HTTPS, and it is on by default. + :type secure: ``bool`` + + :param host: Override hostname used for connections. + :type host: ``str`` + + :param port: Override port used for connections. + :type port: ``int`` + + :return: ``None`` + """ + super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port, **kwargs) + + def list_record_types(self): + """ + Return a list of RecordType objects supported by the provider. + + :return: ``list`` of :class:`RecordType` + """ + return list(self.RECORD_TYPE_MAP.keys()) + + def iterate_zones(self): + """ + Return a generator to iterate over available zones. + + :rtype: ``generator`` of :class:`Zone` + """ + raise NotImplementedError( + 'iterate_zones not implemented for this driver') + + def list_zones(self): + """ + Return a list of zones. + + :return: ``list`` of :class:`Zone` + """ + return list(self.iterate_zones()) + + def iterate_records(self, zone): + """ + Return a generator to iterate over records for the provided zone. + + :param zone: Zone to list records for. + :type zone: :class:`Zone` + + :rtype: ``generator`` of :class:`Record` + """ + raise NotImplementedError( + 'iterate_records not implemented for this driver') + + def list_records(self, zone): + """ + Return a list of records for the provided zone. + + :param zone: Zone to list records for. + :type zone: :class:`Zone` + + :return: ``list`` of :class:`Record` + """ + return list(self.iterate_records(zone)) + + def get_zone(self, zone_id): + """ + Return a Zone instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :rtype: :class:`Zone` + """ + raise NotImplementedError( + 'get_zone not implemented for this driver') + + def get_record(self, zone_id, record_id): + """ + Return a Record instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :param record_id: ID of the required record + :type record_id: ``str`` + + :rtype: :class:`Record` + """ + raise NotImplementedError( + 'get_record not implemented for this driver') + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + :param domain: Zone domain name (e.g. example.com) + :type domain: ``str`` + + :param type: Zone type (master / slave). + :type type: ``str`` + + :param ttl: TTL for new records. (optional) + :type ttl: ``int`` + + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` + + :rtype: :class:`Zone` + """ + raise NotImplementedError( + 'create_zone not implemented for this driver') + + def update_zone(self, zone, domain, type='master', ttl=None, extra=None): + """ + Update en existing zone. + + :param zone: Zone to update. + :type zone: :class:`Zone` + + :param domain: Zone domain name (e.g. example.com) + :type domain: ``str`` + + :param type: Zone type (master / slave). + :type type: ``str`` + + :param ttl: TTL for new records. (optional) + :type ttl: ``int`` + + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` + + :rtype: :class:`Zone` + """ + raise NotImplementedError( + 'update_zone not implemented for this driver') + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + :param name: Record name without the domain name (e.g. www). + Note: If you want to create a record for a base domain + name, you should specify empty string ('') for this + argument. + :type name: ``str`` + + :param zone: Zone where the requested record is created. + :type zone: :class:`Zone` + + :param type: DNS record type (A, AAAA, ...). + :type type: :class:`RecordType` + + :param data: Data for the record (depends on the record type). + :type data: ``str`` + + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` + + :rtype: :class:`Record` + """ + raise NotImplementedError( + 'create_record not implemented for this driver') + + def update_record(self, record, name, type, data, extra): + """ + Update an existing record. + + :param record: Record to update. + :type record: :class:`Record` + + :param name: Record name without the domain name (e.g. www). + Note: If you want to create a record for a base domain + name, you should specify empty string ('') for this + argument. + :type name: ``str`` + + :param type: DNS record type (A, AAAA, ...). + :type type: :class:`RecordType` + + :param data: Data for the record (depends on the record type). + :type data: ``str`` + + :param extra: (optional) Extra attributes (driver specific). + :type extra: ``dict`` + + :rtype: :class:`Record` + """ + raise NotImplementedError( + 'update_record not implemented for this driver') + + def delete_zone(self, zone): + """ + Delete a zone. + + Note: This will delete all the records belonging to this zone. + + :param zone: Zone to delete. + :type zone: :class:`Zone` + + :rtype: ``bool`` + """ + raise NotImplementedError( + 'delete_zone not implemented for this driver') + + def delete_record(self, record): + """ + Delete a record. + + :param record: Record to delete. + :type record: :class:`Record` + + :rtype: ``bool`` + """ + raise NotImplementedError( + 'delete_record not implemented for this driver') + + def export_zone_to_bind_format(self, zone): + """ + Export Zone object to the BIND compatible format. + + :param zone: Zone to export. + :type zone: :class:`Zone` + + :return: Zone data in BIND compatible format. + :rtype: ``str`` + """ + if zone.type != 'master': + raise ValueError('You can only generate BIND out for master zones') + + lines = [] + + # For consistent output, records are sorted based on the id + records = zone.list_records() + records = sorted(records, key=Record._get_numeric_id) + + date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S') + values = {'version': __version__, 'date': date} + + lines.append('; Generated by Libcloud v%(version)s on %(date)s' % + values) + lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain}) + lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl}) + + for record in records: + line = self._get_bind_record_line(record=record) + lines.append(line) + + output = '\n'.join(lines) + return output + + def export_zone_to_bind_zone_file(self, zone, file_path): + """ + Export Zone object to the BIND compatible format and write result to a + file. + + :param zone: Zone to export. + :type zone: :class:`Zone` + + :param file_path: File path where the output will be saved. + :type file_path: ``str`` + """ + result = self.export_zone_to_bind_format(zone=zone) + + with open(file_path, 'w') as fp: + fp.write(result) + + def _get_bind_record_line(self, record): + """ + Generate BIND record line for the provided record. + + :param record: Record to generate the line for. + :type record: :class:`Record` + + :return: Bind compatible record line. + :rtype: ``str`` + """ + parts = [] + + if record.name: + name = '%(name)s.%(domain)s' % {'name': record.name, + 'domain': record.zone.domain} + else: + name = record.zone.domain + + name += '.' + + ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl + ttl = str(ttl) + data = record.data + + if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX, + RecordType.PTR, RecordType.SRV]: + # Make sure trailing dot is present + if data[len(data) - 1] != '.': + data += '.' + + if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data: + # Escape the quotes + data = data.replace('"', '\\"') + + # Quote the string + data = '"%s"' % (data) + + if record.type in [RecordType.MX, RecordType.SRV]: + priority = str(record.extra['priority']) + parts = [name, ttl, 'IN', record.type, priority, data] + else: + parts = [name, ttl, 'IN', record.type, data] + + line = '\t'.join(parts) + return line + + def _string_to_record_type(self, string): + """ + Return a string representation of a DNS record type to a + libcloud RecordType ENUM. + + :rtype: ``str`` + """ + string = string.upper() + record_type = getattr(RecordType, string) + return record_type diff -Nru libcloud-0.5.0/libcloud/dns/drivers/dummy.py libcloud-0.15.1/libcloud/dns/drivers/dummy.py --- libcloud-0.5.0/libcloud/dns/drivers/dummy.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/dummy.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,218 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.dns.base import DNSDriver, Zone, Record +from libcloud.dns.types import RecordType +from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.types import RecordAlreadyExistsError + + +class DummyDNSDriver(DNSDriver): + """ + Dummy DNS driver. + + >>> from libcloud.dns.drivers.dummy import DummyDNSDriver + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.name + 'Dummy DNS Provider' + """ + + name = 'Dummy DNS Provider' + website = 'http://example.com' + + def __init__(self, api_key, api_secret): + """ + :param api_key: API key or username to used (required) + :type api_key: ``str`` + + :param api_secret: Secret password to be used (required) + :type api_secret: ``str`` + + :rtype: ``None`` + """ + self._zones = {} + + def list_record_types(self): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.list_record_types() + ['A'] + + @inherits: :class:`DNSDriver.list_record_types` + """ + return [RecordType.A] + + def list_zones(self): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.list_zones() + [] + + @inherits: :class:`DNSDriver.list_zones` + """ + + return [zone['zone'] for zone in list(self._zones.values())] + + def list_records(self, zone): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> list(zone.list_records()) + [] + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + >>> list(zone.list_records()) #doctest: +ELLIPSIS + [] + """ + return self._zones[zone.id]['records'].values() + + def get_zone(self, zone_id): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.get_zone(zone_id='foobar') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + + @inherits: :class:`DNSDriver.get_zone` + """ + + if zone_id not in self._zones: + raise ZoneDoesNotExistError(driver=self, value=None, + zone_id=zone_id) + + return self._zones[zone_id]['zone'] + + def get_record(self, zone_id, record_id): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.get_record(zone_id='doesnotexist', record_id='exists') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + + @inherits: :class:`DNSDriver.get_record` + """ + + self.get_zone(zone_id=zone_id) + zone_records = self._zones[zone_id]['records'] + + if record_id not in zone_records: + raise RecordDoesNotExistError(record_id=record_id, value=None, + driver=self) + + return zone_records[record_id] + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> zone + + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneAlreadyExistsError: + + @inherits: :class:`DNSDriver.create_zone` + """ + + id = 'id-%s' % (domain) + + if id in self._zones: + raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self) + + zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={}, + driver=self) + self._zones[id] = {'zone': zone, + 'records': {}} + return zone + + def create_record(self, name, zone, type, data, extra=None): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + >>> record #doctest: +ELLIPSIS + + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + RecordAlreadyExistsError: + + @inherits: :class:`DNSDriver.create_record` + """ + id = 'id-%s' % (name) + + zone = self.get_zone(zone_id=zone.id) + + if id in self._zones[zone.id]['records']: + raise RecordAlreadyExistsError(record_id=id, value=None, + driver=self) + + record = Record(id=id, name=name, type=type, data=data, extra=extra, + zone=zone, driver=self) + self._zones[zone.id]['records'][id] = record + return record + + def delete_zone(self, zone): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> driver.delete_zone(zone) + True + >>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + + @inherits: :class:`DNSDriver.delete_zone` + """ + self.get_zone(zone_id=zone.id) + + del self._zones[zone.id] + return True + + def delete_record(self, record): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + >>> driver.delete_record(record) + True + >>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + RecordDoesNotExistError: + + @inherits: :class:`DNSDriver.delete_record` + """ + self.get_record(zone_id=record.zone.id, record_id=record.id) + + del self._zones[record.zone.id]['records'][record.id] + return True + + +if __name__ == "__main__": + import doctest + doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/dns/drivers/gandi.py libcloud-0.15.1/libcloud/dns/drivers/gandi.py --- libcloud-0.5.0/libcloud/dns/drivers/gandi.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/gandi.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,270 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +__all__ = [ + 'GandiDNSDriver' +] + +from libcloud.common.gandi import BaseGandiDriver, GandiConnection +from libcloud.common.gandi import GandiResponse +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import RecordError +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +TTL_MIN = 30 +TTL_MAX = 2592000 # 30 days + + +class NewZoneVersion(object): + """ + Changes to a zone in the Gandi DNS service need to be wrapped in a new + version object. The changes are made to the new version, then that + version is made active. + + In effect, this is a transaction. + + Any calls made inside this context manager will be applied to a new version + id. If your changes are succesful (and only if they are successful) they + are activated. + """ + + def __init__(self, driver, zone): + self.driver = driver + self.connection = driver.connection + self.zone = zone + + def __enter__(self): + zid = int(self.zone.id) + self.connection.set_context({'zone_id': self.zone.id}) + vid = self.connection.request('domain.zone.version.new', zid).object + self.vid = vid + return vid + + def __exit__(self, type, value, traceback): + if not traceback: + zid = int(self.zone.id) + con = self.connection + con.set_context({'zone_id': self.zone.id}) + con.request('domain.zone.version.set', zid, self.vid).object + + +class GandiDNSResponse(GandiResponse): + exceptions = { + 581042: ZoneDoesNotExistError, + } + + +class GandiDNSConnection(GandiConnection): + responseCls = GandiDNSResponse + + +class GandiDNSDriver(BaseGandiDriver, DNSDriver): + """ + API reference can be found at: + + http://doc.rpc.gandi.net/domain/reference.html + """ + + type = Provider.GANDI + name = 'Gandi DNS' + website = 'http://www.gandi.net/domain' + + connectionCls = GandiDNSConnection + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.LOC: 'LOC', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + RecordType.WKS: 'WKS', + } + + def _to_zone(self, zone): + return Zone( + id=str(zone['id']), + domain=zone['name'], + type='master', + ttl=0, + driver=self, + extra={} + ) + + def _to_zones(self, zones): + ret = [] + for z in zones: + ret.append(self._to_zone(z)) + return ret + + def list_zones(self): + zones = self.connection.request('domain.zone.list') + return self._to_zones(zones.object) + + def get_zone(self, zone_id): + zid = int(zone_id) + self.connection.set_context({'zone_id': zone_id}) + zone = self.connection.request('domain.zone.info', zid) + return self._to_zone(zone.object) + + def create_zone(self, domain, type='master', ttl=None, extra=None): + params = { + 'name': domain, + } + info = self.connection.request('domain.zone.create', params) + return self._to_zone(info.object) + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + zid = int(zone.id) + params = {'name': domain} + self.connection.set_context({'zone_id': zone.id}) + zone = self.connection.request('domain.zone.update', zid, params) + return self._to_zone(zone.object) + + def delete_zone(self, zone): + zid = int(zone.id) + self.connection.set_context({'zone_id': zone.id}) + res = self.connection.request('domain.zone.delete', zid) + return res.object + + def _to_record(self, record, zone): + return Record( + id='%s:%s' % (record['type'], record['name']), + name=record['name'], + type=self._string_to_record_type(record['type']), + data=record['value'], + zone=zone, + driver=self, + extra={'ttl': record['ttl']} + ) + + def _to_records(self, records, zone): + retval = [] + for r in records: + retval.append(self._to_record(r, zone)) + return retval + + def list_records(self, zone): + zid = int(zone.id) + self.connection.set_context({'zone_id': zone.id}) + records = self.connection.request('domain.zone.record.list', zid, 0) + return self._to_records(records.object, zone) + + def get_record(self, zone_id, record_id): + zid = int(zone_id) + record_type, name = record_id.split(':', 1) + filter_opts = { + 'name': name, + 'type': record_type + } + self.connection.set_context({'zone_id': zone_id}) + records = self.connection.request('domain.zone.record.list', + zid, 0, filter_opts).object + + if len(records) == 0: + raise RecordDoesNotExistError(value='', driver=self, + record_id=record_id) + + return self._to_record(records[0], self.get_zone(zone_id)) + + def _validate_record(self, record_id, name, record_type, data, extra): + if len(data) > 1024: + raise RecordError('Record data must be <= 1024 characters', + driver=self, record_id=record_id) + if extra and 'ttl' in extra: + if extra['ttl'] < TTL_MIN: + raise RecordError('TTL must be at least 30 seconds', + driver=self, record_id=record_id) + if extra['ttl'] > TTL_MAX: + raise RecordError('TTL must not excdeed 30 days', + driver=self, record_id=record_id) + + def create_record(self, name, zone, type, data, extra=None): + self._validate_record(None, name, type, data, extra) + + zid = int(zone.id) + + create = { + 'name': name, + 'type': self.RECORD_TYPE_MAP[type], + 'value': data + } + + if 'ttl' in extra: + create['ttl'] = extra['ttl'] + + with NewZoneVersion(self, zone) as vid: + con = self.connection + con.set_context({'zone_id': zone.id}) + rec = con.request('domain.zone.record.add', + zid, vid, create).object + + return self._to_record(rec, zone) + + def update_record(self, record, name, type, data, extra): + self._validate_record(record.id, name, type, data, extra) + + filter_opts = { + 'name': record.name, + 'type': self.RECORD_TYPE_MAP[record.type] + } + + update = { + 'name': name, + 'type': self.RECORD_TYPE_MAP[type], + 'value': data + } + + if 'ttl' in extra: + update['ttl'] = extra['ttl'] + + zid = int(record.zone.id) + + with NewZoneVersion(self, record.zone) as vid: + con = self.connection + con.set_context({'zone_id': record.zone.id}) + con.request('domain.zone.record.delete', + zid, vid, filter_opts) + res = con.request('domain.zone.record.add', + zid, vid, update).object + + return self._to_record(res, record.zone) + + def delete_record(self, record): + zid = int(record.zone.id) + + filter_opts = { + 'name': record.name, + 'type': self.RECORD_TYPE_MAP[record.type] + } + + with NewZoneVersion(self, record.zone) as vid: + con = self.connection + con.set_context({'zone_id': record.zone.id}) + count = con.request('domain.zone.record.delete', + zid, vid, filter_opts).object + + if count == 1: + return True + + raise RecordDoesNotExistError(value='No such record', driver=self, + record_id=record.id) diff -Nru libcloud-0.5.0/libcloud/dns/drivers/google.py libcloud-0.15.1/libcloud/dns/drivers/google.py --- libcloud-0.5.0/libcloud/dns/drivers/google.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/google.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,345 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'GoogleDNSDriver' +] + +API_VERSION = 'v1beta1' + +import re +from libcloud.common.google import GoogleResponse, GoogleBaseConnection +from libcloud.common.google import ResourceNotFoundError +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +class GoogleDNSResponse(GoogleResponse): + pass + + +class GoogleDNSConnection(GoogleBaseConnection): + host = "www.googleapis.com" + responseCls = GoogleDNSResponse + + def __init__(self, user_id, key, secure, auth_type=None, + credential_file=None, project=None, **kwargs): + super(GoogleDNSConnection, self).\ + __init__(user_id, key, secure=secure, auth_type=auth_type, + credential_file=credential_file, **kwargs) + self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project) + + +class GoogleDNSDriver(DNSDriver): + type = Provider.GOOGLE + name = 'Google DNS' + connectionCls = GoogleDNSConnection + website = 'https://cloud.google.com/' + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.SOA: 'SOA', + RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + } + + def __init__(self, user_id, key, project=None, auth_type=None, scopes=None, + **kwargs): + self.auth_type = auth_type + self.project = project + self.scopes = scopes + if not self.project: + raise ValueError('Project name must be specified using ' + '"project" keyword.') + super(GoogleDNSDriver, self).__init__(user_id, key, scopes, **kwargs) + + def iterate_zones(self): + """ + Return a generator to iterate over available zones. + + :rtype: ``generator`` of :class:`Zone` + """ + return self._get_more('zones') + + def iterate_records(self, zone): + """ + Return a generator to iterate over records for the provided zone. + + :param zone: Zone to list records for. + :type zone: :class:`Zone` + + :rtype: ``generator`` of :class:`Record` + """ + return self._get_more('records', zone=zone) + + def get_zone(self, zone_id): + """ + Return a Zone instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :rtype: :class:`Zone` + """ + request = '/managedZones/%s' % (zone_id) + + try: + response = self.connection.request(request, method='GET').object + except ResourceNotFoundError: + raise ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=zone_id) + + return self._to_zone(response) + + def get_record(self, zone_id, record_id): + """ + Return a Record instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :param record_id: ID of the required record + :type record_id: ``str`` + + :rtype: :class:`Record` + """ + (record_type, record_name) = record_id.split(':', 1) + + params = { + 'name': record_name, + 'type': record_type, + } + + request = '/managedZones/%s/rrsets' % (zone_id) + + try: + response = self.connection.request(request, method='GET', + params=params).object + except ResourceNotFoundError: + raise ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=zone_id) + + if len(response['rrsets']) > 0: + zone = self.get_zone(zone_id) + return self._to_record(response['rrsets'][0], zone) + + raise RecordDoesNotExistError(value='', driver=self.connection.driver, + record_id=record_id) + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + :param domain: Zone domain name (e.g. example.com.) with a \'.\' + at the end. + :type domain: ``str`` + + :param type: Zone type (master is the only one supported). + :type type: ``str`` + + :param ttl: TTL for new records. (unused) + :type ttl: ``int`` + + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` + + :rtype: :class:`Zone` + """ + name = None + description = '' + + if extra: + description = extra.get('description') + name = extra.get('name') + + if name is None: + name = self._cleanup_domain(domain) + + data = { + 'dnsName': domain, + 'name': name, + 'description': description, + } + + request = '/managedZones' + response = self.connection.request(request, method='POST', + data=data).object + return self._to_zone(response) + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + :param name: Record name fully qualified, with a \'.\' at the end. + :type name: ``str`` + + :param zone: Zone where the requested record is created. + :type zone: :class:`Zone` + + :param type: DNS record type (A, AAAA, ...). + :type type: :class:`RecordType` + + :param data: Data for the record (depends on the record type). + :type data: ``str`` + + :param extra: Extra attributes. (optional) + :type extra: ``dict`` + + :rtype: :class:`Record` + """ + ttl = data.get('ttl', None) + rrdatas = data.get('rrdatas', []) + + data = { + 'additions': [ + { + 'name': name, + 'type': type, + 'ttl': int(ttl), + 'rrdatas': rrdatas, + } + ] + } + request = '/managedZones/%s/changes' % (zone.id) + response = self.connection.request(request, method='POST', + data=data).object + return self._to_record(response['additions'][0], zone) + + def delete_zone(self, zone): + """ + Delete a zone. + + Note: This will delete all the records belonging to this zone. + + :param zone: Zone to delete. + :type zone: :class:`Zone` + + :rtype: ``bool`` + """ + request = '/managedZones/%s' % (zone.id) + response = self.connection.request(request, method='DELETE') + return response.success() + + def delete_record(self, record): + """ + Delete a record. + + :param record: Record to delete. + :type record: :class:`Record` + + :rtype: ``bool`` + """ + data = { + 'deletions': [ + { + 'name': record.name, + 'type': record.type, + 'rrdatas': record.data['rrdatas'], + 'ttl': record.data['ttl'] + } + ] + } + request = '/managedZones/%s/changes' % (record.zone.id) + response = self.connection.request(request, method='POST', + data=data) + return response.success() + + def _get_more(self, rtype, **kwargs): + last_key = None + exhausted = False + while not exhausted: + items, last_key, exhausted = self._get_data(rtype, last_key, + **kwargs) + for item in items: + yield item + + def _get_data(self, rtype, last_key, **kwargs): + params = {} + + if last_key: + params['pageToken'] = last_key + + if rtype == 'zones': + request = '/managedZones' + transform_func = self._to_zones + r_key = 'managedZones' + elif rtype == 'records': + zone = kwargs['zone'] + request = '/managedZones/%s/rrsets' % (zone.id) + transform_func = self._to_records + r_key = 'rrsets' + + response = self.connection.request(request, method='GET', + params=params,) + + if response.success(): + nextpage = response.object.get('nextPageToken', None) + items = transform_func(response.object.get(r_key), **kwargs) + exhausted = False if nextpage is not None else True + return items, nextpage, exhausted + else: + return [], None, True + + def _ex_connection_class_kwargs(self): + return {'auth_type': self.auth_type, + 'project': self.project, + 'scopes': self.scopes} + + def _to_zones(self, response): + zones = [] + for r in response: + zones.append(self._to_zone(r)) + return zones + + def _to_zone(self, r): + extra = {} + + if 'description' in r: + extra['description'] = r.get('description') + + extra['creationTime'] = r.get('creationTime') + extra['nameServers'] = r.get('nameServers') + extra['id'] = r.get('id') + + return Zone(id=r['name'], domain=r['dnsName'], + type='master', ttl=0, driver=self, extra=extra) + + def _to_records(self, response, zone): + records = [] + for r in response: + records.append(self._to_record(r, zone)) + return records + + def _to_record(self, r, zone): + record_id = '%s:%s' % (r['type'], r['name']) + return Record(id=record_id, name=r['name'], + type=r['type'], data=r, zone=zone, + driver=self, extra={}) + + def _cleanup_domain(self, domain): + # name can only contain lower case alphanumeric characters and hyphens + domain = re.sub(r'[^a-zA-Z0-9-]', '-', domain) + if domain[-1] == '-': + domain = domain[:-1] + return domain diff -Nru libcloud-0.5.0/libcloud/dns/drivers/hostvirtual.py libcloud-0.15.1/libcloud/dns/drivers/hostvirtual.py --- libcloud-0.5.0/libcloud/dns/drivers/hostvirtual.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/hostvirtual.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,243 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License.You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'HostVirtualDNSDriver' +] + +from libcloud.utils.py3 import httplib +from libcloud.utils.misc import merge_valid_keys, get_new_obj +from libcloud.common.hostvirtual import HostVirtualResponse +from libcloud.common.hostvirtual import HostVirtualConnection +from libcloud.compute.drivers.hostvirtual import API_ROOT +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + +try: + import simplejson as json +except: + import json + +VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl'] + + +class HostVirtualDNSResponse(HostVirtualResponse): + def parse_error(self): + context = self.connection.context + status = int(self.status) + + if status == httplib.NOT_FOUND: + if context['resource'] == 'zone': + raise ZoneDoesNotExistError(value='', driver=self, + zone_id=context['id']) + elif context['resource'] == 'record': + raise RecordDoesNotExistError(value='', driver=self, + record_id=context['id']) + + super(HostVirtualDNSResponse, self).parse_error() + return self.body + + +class HostVirtualDNSConnection(HostVirtualConnection): + responseCls = HostVirtualDNSResponse + + +class HostVirtualDNSDriver(DNSDriver): + type = Provider.HOSTVIRTUAL + name = 'Host Virtual DNS' + website = 'http://www.vr.org/' + connectionCls = HostVirtualDNSConnection + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + } + + def __init__(self, key, secure=True, host=None, port=None): + super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure, + host=host, port=port) + + def _to_zones(self, items): + zones = [] + for item in items: + zones.append(self._to_zone(item)) + return zones + + def _to_zone(self, item): + extra = {} + if 'records' in item: + extra['records'] = item['records'] + if item['type'] == 'NATIVE': + item['type'] = 'master' + zone = Zone(id=item['id'], domain=item['name'], + type=item['type'], ttl=item['ttl'], + driver=self, extra=extra) + return zone + + def _to_records(self, items, zone=None): + records = [] + + for item in items: + records.append(self._to_record(item=item, zone=zone)) + return records + + def _to_record(self, item, zone=None): + extra = {'ttl': item['ttl']} + type = self._string_to_record_type(item['type']) + record = Record(id=item['id'], name=item['name'], + type=type, data=item['content'], + zone=zone, driver=self, extra=extra) + return record + + def list_zones(self): + result = self.connection.request( + API_ROOT + '/dns/zones/').object + zones = self._to_zones(result) + return zones + + def list_records(self, zone): + params = {'id': zone.id} + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + result = self.connection.request( + API_ROOT + '/dns/records/', params=params).object + records = self._to_records(items=result, zone=zone) + return records + + def get_zone(self, zone_id): + params = {'id': zone_id} + self.connection.set_context({'resource': 'zone', 'id': zone_id}) + result = self.connection.request( + API_ROOT + '/dns/zone/', params=params).object + if 'id' not in result: + raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) + zone = self._to_zone(result) + return zone + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + params = {'id': record_id} + self.connection.set_context({'resource': 'record', 'id': record_id}) + result = self.connection.request( + API_ROOT + '/dns/record/', params=params).object + if 'id' not in result: + raise RecordDoesNotExistError(value='', + driver=self, record_id=record_id) + record = self._to_record(item=result, zone=zone) + return record + + def delete_zone(self, zone): + params = {'zone_id': zone.id} + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + result = self.connection.request( + API_ROOT + '/dns/zone/', params=params, method='DELETE').object + return bool(result) + + def delete_record(self, record): + params = {'id': record.id} + self.connection.set_context({'resource': 'record', 'id': record.id}) + result = self.connection.request( + API_ROOT + '/dns/record/', params=params, method='DELETE').object + + return bool(result) + + def create_zone(self, domain, type='NATIVE', ttl=None, extra=None): + if type == 'master': + type = 'NATIVE' + elif type == 'slave': + type = 'SLAVE' + params = {'name': domain, 'type': type, 'ttl': ttl} + result = self.connection.request( + API_ROOT + '/dns/zone/', + data=json.dumps(params), method='POST').object + extra = { + 'soa': result['soa'], + 'ns': result['ns'] + } + zone = Zone(id=result['id'], domain=domain, + type=type, ttl=ttl, extra=extra, driver=self) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + params = {'id': zone.id} + if domain: + params['name'] = domain + if type: + params['type'] = type + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + self.connection.request(API_ROOT + '/dns/zone/', + data=json.dumps(params), method='PUT').object + updated_zone = get_new_obj( + obj=zone, klass=Zone, + attributes={ + 'domain': domain, + 'type': type, + 'ttl': ttl, + 'extra': extra + }) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + params = { + 'name': name, + 'type': self.RECORD_TYPE_MAP[type], + 'domain_id': zone.id, + 'content': data + } + merged = merge_valid_keys( + params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra + ) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + result = self.connection.request( + API_ROOT + '/dns/record/', + data=json.dumps(params), method='POST').object + record = Record(id=result['id'], name=name, + type=type, data=data, + extra=merged, zone=zone, driver=self) + return record + + def update_record(self, record, name=None, type=None, + data=None, extra=None): + params = { + 'domain_id': record.zone.id, + 'record_id': record.id + } + if name: + params['name'] = name + if data: + params['content'] = data + if type is not None: + params['type'] = self.RECORD_TYPE_MAP[type] + merged = merge_valid_keys( + params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra + ) + self.connection.set_context({'resource': 'record', 'id': record.id}) + self.connection.request(API_ROOT + '/dns/record/', + data=json.dumps(params), method='PUT').object + updated_record = get_new_obj( + obj=record, klass=Record, attributes={ + 'name': name, 'data': data, + 'type': type, + 'extra': merged + }) + return updated_record diff -Nru libcloud-0.5.0/libcloud/dns/drivers/linode.py libcloud-0.15.1/libcloud/dns/drivers/linode.py --- libcloud-0.5.0/libcloud/dns/drivers/linode.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/linode.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,272 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'LinodeDNSDriver' +] + +from libcloud.utils.misc import merge_valid_keys, get_new_obj +from libcloud.common.linode import (API_ROOT, LinodeException, + LinodeConnection, LinodeResponse) +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec', + 'Expire_sec', 'status', 'master_ips'] + +VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol', + 'TTL_sec'] + + +class LinodeDNSResponse(LinodeResponse): + def _make_excp(self, error): + result = super(LinodeDNSResponse, self)._make_excp(error) + if isinstance(result, LinodeException) and result.code == 5: + context = self.connection.context + + if context['resource'] == 'zone': + result = ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=context['id']) + + elif context['resource'] == 'record': + result = RecordDoesNotExistError(value='', + driver=self.connection.driver, + record_id=context['id']) + return result + + +class LinodeDNSConnection(LinodeConnection): + responseCls = LinodeDNSResponse + + +class LinodeDNSDriver(DNSDriver): + type = Provider.LINODE + name = 'Linode DNS' + website = 'http://www.linode.com/' + connectionCls = LinodeDNSConnection + + RECORD_TYPE_MAP = { + RecordType.NS: 'NS', + RecordType.MX: 'MX', + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.TXT: 'TXT', + RecordType.SRV: 'SRV', + } + + def list_zones(self): + params = {'api_action': 'domain.list'} + data = self.connection.request(API_ROOT, params=params).objects[0] + zones = self._to_zones(data) + return zones + + def list_records(self, zone): + params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id} + + self.connection.set_context(context={'resource': 'zone', + 'id': zone.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + records = self._to_records(items=data, zone=zone) + return records + + def get_zone(self, zone_id): + params = {'api_action': 'domain.list', 'DomainID': zone_id} + self.connection.set_context(context={'resource': 'zone', + 'id': zone_id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + zones = self._to_zones(data) + + if len(zones) != 1: + raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) + + return zones[0] + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + params = {'api_action': 'domain.resource.list', 'DomainID': zone_id, + 'ResourceID': record_id} + self.connection.set_context(context={'resource': 'record', + 'id': record_id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + records = self._to_records(items=data, zone=zone) + + if len(records) != 1: + raise RecordDoesNotExistError(value='', driver=self, + record_id=record_id) + + return records[0] + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + API docs: http://www.linode.com/api/dns/domain.create + """ + params = {'api_action': 'domain.create', 'Type': type, + 'Domain': domain} + + if ttl: + params['TTL_sec'] = ttl + + merged = merge_valid_keys(params=params, + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + data = self.connection.request(API_ROOT, params=params).objects[0] + zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl, + extra=merged, driver=self) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + """ + Update an existing zone. + + API docs: http://www.linode.com/api/dns/domain.update + """ + params = {'api_action': 'domain.update', 'DomainID': zone.id} + + if type: + params['Type'] = type + + if domain: + params['Domain'] = domain + + if ttl: + params['TTL_sec'] = ttl + + merged = merge_valid_keys(params=params, + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + self.connection.request(API_ROOT, params=params).objects[0] + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'domain': domain, + 'type': type, 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + API docs: http://www.linode.com/api/dns/domain.resource.create + """ + params = {'api_action': 'domain.resource.create', 'DomainID': zone.id, + 'Name': name, 'Target': data, + 'Type': self.RECORD_TYPE_MAP[type]} + merged = merge_valid_keys(params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + + result = self.connection.request(API_ROOT, params=params).objects[0] + record = Record(id=result['ResourceID'], name=name, type=type, + data=data, extra=merged, zone=zone, driver=self) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + """ + Update an existing record. + + API docs: http://www.linode.com/api/dns/domain.resource.update + """ + params = {'api_action': 'domain.resource.update', + 'ResourceID': record.id, 'DomainID': record.zone.id} + + if name: + params['Name'] = name + + if data: + params['Target'] = data + + if type is not None: + params['Type'] = self.RECORD_TYPE_MAP[type] + + merged = merge_valid_keys(params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + + self.connection.request(API_ROOT, params=params).objects[0] + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'name': name, 'data': data, + 'type': type, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + params = {'api_action': 'domain.delete', 'DomainID': zone.id} + + self.connection.set_context(context={'resource': 'zone', + 'id': zone.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + + return 'DomainID' in data + + def delete_record(self, record): + params = {'api_action': 'domain.resource.delete', + 'DomainID': record.zone.id, 'ResourceID': record.id} + + self.connection.set_context(context={'resource': 'record', + 'id': record.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + + return 'ResourceID' in data + + def _to_zones(self, items): + """ + Convert a list of items to the Zone objects. + """ + zones = [] + + for item in items: + zones.append(self._to_zone(item)) + + return zones + + def _to_zone(self, item): + """ + Build an Zone object from the item dictionary. + """ + extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'], + 'description': item['DESCRIPTION']} + zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'], + type=item['TYPE'], ttl=item['TTL_SEC'], driver=self, + extra=extra) + return zone + + def _to_records(self, items, zone=None): + """ + Convert a list of items to the Record objects. + """ + records = [] + + for item in items: + records.append(self._to_record(item=item, zone=zone)) + + return records + + def _to_record(self, item, zone=None): + """ + Build a Record object from the item dictionary. + """ + extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'], + 'port': item['PORT'], 'weight': item['WEIGHT']} + type = self._string_to_record_type(item['TYPE']) + record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type, + data=item['TARGET'], zone=zone, driver=self, + extra=extra) + return record diff -Nru libcloud-0.5.0/libcloud/dns/drivers/rackspace.py libcloud-0.15.1/libcloud/dns/drivers/rackspace.py --- libcloud-0.5.0/libcloud/dns/drivers/rackspace.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/rackspace.py 2014-07-02 18:47:55.000000000 +0000 @@ -0,0 +1,450 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from libcloud.common.openstack import OpenStackDriverMixin + +__all__ = [ + 'RackspaceUSDNSDriver', + 'RackspaceUKDNSDriver' +] + +from libcloud.utils.py3 import httplib +import copy + +from libcloud.common.base import PollingConnection +from libcloud.common.types import LibcloudError +from libcloud.utils.misc import merge_valid_keys, get_new_obj +from libcloud.common.rackspace import AUTH_URL +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_Response + +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + +VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1'] +VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment', 'priority'] + + +class RackspaceDNSResponse(OpenStack_1_1_Response): + """ + Rackspace DNS Response class. + """ + + def parse_error(self): + status = int(self.status) + context = self.connection.context + body = self.parse_body() + + if status == httplib.NOT_FOUND: + if context['resource'] == 'zone': + raise ZoneDoesNotExistError(value='', driver=self, + zone_id=context['id']) + elif context['resource'] == 'record': + raise RecordDoesNotExistError(value='', driver=self, + record_id=context['id']) + if body: + if 'code' and 'message' in body: + err = '%s - %s (%s)' % (body['code'], body['message'], + body['details']) + return err + elif 'validationErrors' in body: + errors = [m for m in body['validationErrors']['messages']] + err = 'Validation errors: %s' % ', '.join(errors) + return err + + raise LibcloudError('Unexpected status code: %s' % (status)) + + +class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): + """ + Rackspace DNS Connection class. + """ + + responseCls = RackspaceDNSResponse + XML_NAMESPACE = None + poll_interval = 2.5 + timeout = 30 + + auth_url = AUTH_URL + _auth_version = '2.0' + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + super(RackspaceDNSConnection, self).__init__(*args, **kwargs) + + def get_poll_request_kwargs(self, response, context, request_kwargs): + job_id = response.object['jobId'] + kwargs = {'action': '/status/%s' % (job_id), + 'params': {'showDetails': True}} + return kwargs + + def has_completed(self, response): + status = response.object['status'] + if status == 'ERROR': + data = response.object['error'] + + if 'code' and 'message' in data: + message = '%s - %s (%s)' % (data['code'], data['message'], + data['details']) + else: + message = data['message'] + + raise LibcloudError(message, + driver=self.driver) + + return status == 'COMPLETED' + + def get_endpoint(self): + if '2.0' in self._auth_version: + ep = self.service_catalog.get_endpoint(name='cloudDNS', + service_type='rax:dns', + region=None) + else: + raise LibcloudError("Auth version %s not supported" % + (self._auth_version)) + + public_url = ep.get('publicURL', None) + + # This is a nasty hack, but because of how global auth and old accounts + # work, there is no way around it. + if self.region == 'us': + # Old UK account, which only has us endpoint in the catalog + public_url = public_url.replace('https://lon.dns.api', + 'https://dns.api') + if self.region == 'uk': + # Old US account, which only has uk endpoint in the catalog + public_url = public_url.replace('https://dns.api', + 'https://lon.dns.api') + + return public_url + + +class RackspaceDNSDriver(DNSDriver, OpenStackDriverMixin): + name = 'Rackspace DNS' + website = 'http://www.rackspace.com/' + type = Provider.RACKSPACE + connectionCls = RackspaceDNSConnection + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us', **kwargs): + if region not in ['us', 'uk']: + raise ValueError('Invalid region: %s' % (region)) + + OpenStackDriverMixin.__init__(self, **kwargs) + super(RackspaceDNSDriver, self).__init__(key=key, secret=secret, + host=host, port=port, + region=region) + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + } + + def iterate_zones(self): + offset = 0 + limit = 100 + while True: + params = { + 'limit': limit, + 'offset': offset, + } + response = self.connection.request( + action='/domains', params=params).object + zones_list = response['domains'] + for item in zones_list: + yield self._to_zone(item) + + if _rackspace_result_has_more(response, len(zones_list), limit): + offset += limit + else: + break + + def iterate_records(self, zone): + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + offset = 0 + limit = 100 + while True: + params = { + 'showRecord': True, + 'limit': limit, + 'offset': offset, + } + response = self.connection.request( + action='/domains/%s' % (zone.id), params=params).object + records_list = response['recordsList'] + records = records_list['records'] + for item in records: + record = self._to_record(data=item, zone=zone) + yield record + + if _rackspace_result_has_more(records_list, len(records), limit): + offset += limit + else: + break + + def get_zone(self, zone_id): + self.connection.set_context({'resource': 'zone', 'id': zone_id}) + response = self.connection.request(action='/domains/%s' % (zone_id)) + zone = self._to_zone(data=response.object) + return zone + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + self.connection.set_context({'resource': 'record', 'id': record_id}) + response = self.connection.request(action='/domains/%s/records/%s' % + (zone_id, record_id)).object + record = self._to_record(data=response, zone=zone) + return record + + def create_zone(self, domain, type='master', ttl=None, extra=None): + extra = extra if extra else {} + + # Email address is required + if 'email' not in extra: + raise ValueError('"email" key must be present in extra dictionary') + + payload = {'name': domain, 'emailAddress': extra['email'], + 'recordsList': {'records': []}} + + if ttl: + payload['ttl'] = ttl + + if 'comment' in extra: + payload['comment'] = extra['comment'] + + data = {'domains': [payload]} + response = self.connection.async_request(action='/domains', + method='POST', data=data) + zone = self._to_zone(data=response.object['response']['domains'][0]) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + # Only ttl, comment and email address can be changed + extra = extra if extra else {} + + if domain: + raise LibcloudError('Domain cannot be changed', driver=self) + + data = {} + + if ttl: + data['ttl'] = int(ttl) + + if 'email' in extra: + data['emailAddress'] = extra['email'] + + if 'comment' in extra: + data['comment'] = extra['comment'] + + type = type if type else zone.type + ttl = ttl if ttl else zone.ttl + + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + self.connection.async_request(action='/domains/%s' % (zone.id), + method='PUT', data=data) + merged = merge_valid_keys(params=copy.deepcopy(zone.extra), + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'type': type, + 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + # Name must be a FQDN - e.g. if domain is "foo.com" then a record + # name is "bar.foo.com" + extra = extra if extra else {} + + name = self._to_full_record_name(domain=zone.domain, name=name) + data = {'name': name, 'type': self.RECORD_TYPE_MAP[type], + 'data': data} + + if 'ttl' in extra: + data['ttl'] = int(extra['ttl']) + + if 'priority' in extra: + data['priority'] = int(extra['priority']) + + payload = {'records': [data]} + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.async_request(action='/domains/%s/records' + % (zone.id), data=payload, + method='POST').object + record = self._to_record(data=response['response']['records'][0], + zone=zone) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + # Only data, ttl, and comment attributes can be modified, but name + # attribute must always be present. + extra = extra if extra else {} + + name = self._to_full_record_name(domain=record.zone.domain, + name=record.name) + payload = {'name': name} + + if data: + payload['data'] = data + + if 'ttl' in extra: + payload['ttl'] = extra['ttl'] + + if 'comment' in extra: + payload['comment'] = extra['comment'] + + type = type if type is not None else record.type + data = data if data else record.data + + self.connection.set_context({'resource': 'record', 'id': record.id}) + self.connection.async_request(action='/domains/%s/records/%s' % + (record.zone.id, record.id), + method='PUT', data=payload) + + merged = merge_valid_keys(params=copy.deepcopy(record.extra), + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'type': type, + 'data': data, + 'driver': self, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + self.connection.async_request(action='/domains/%s' % (zone.id), + method='DELETE') + return True + + def delete_record(self, record): + self.connection.set_context({'resource': 'record', 'id': record.id}) + self.connection.async_request(action='/domains/%s/records/%s' % + (record.zone.id, record.id), + method='DELETE') + return True + + def _to_zone(self, data): + id = data['id'] + domain = data['name'] + type = 'master' + ttl = data.get('ttl', 0) + extra = {} + + if 'emailAddress' in data: + extra['email'] = data['emailAddress'] + + if 'comment' in data: + extra['comment'] = data['comment'] + + zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), + driver=self, extra=extra) + return zone + + def _to_record(self, data, zone): + id = data['id'] + fqdn = data['name'] + name = self._to_partial_record_name(domain=zone.domain, name=fqdn) + type = self._string_to_record_type(data['type']) + record_data = data['data'] + extra = {'fqdn': fqdn} + + for key in VALID_RECORD_EXTRA_PARAMS: + if key in data: + extra[key] = data[key] + + record = Record(id=str(id), name=name, type=type, data=record_data, + zone=zone, driver=self, extra=extra) + return record + + def _to_full_record_name(self, domain, name): + """ + Build a FQDN from a domain and record name. + + :param domain: Domain name. + :type domain: ``str`` + + :param name: Record name. + :type name: ``str`` + """ + if name: + name = '%s.%s' % (name, domain) + else: + name = domain + + return name + + def _to_partial_record_name(self, domain, name): + """ + Remove domain portion from the record name. + + :param domain: Domain name. + :type domain: ``str`` + + :param name: Full record name (fqdn). + :type name: ``str`` + """ + if name == domain: + # Map "root" record names to None to be consistent with other + # drivers + return None + + # Strip domain portion + name = name.replace('.%s' % (domain), '') + return name + + def _ex_connection_class_kwargs(self): + kwargs = self.openstack_connection_kwargs() + kwargs['region'] = self.region + return kwargs + + +class RackspaceUSDNSDriver(RackspaceDNSDriver): + name = 'Rackspace DNS (US)' + type = Provider.RACKSPACE_US + + def __init__(self, *args, **kwargs): + kwargs['region'] = 'us' + super(RackspaceUSDNSDriver, self).__init__(*args, **kwargs) + + +class RackspaceUKDNSDriver(RackspaceDNSDriver): + name = 'Rackspace DNS (UK)' + type = Provider.RACKSPACE_UK + + def __init__(self, *args, **kwargs): + kwargs['region'] = 'uk' + super(RackspaceUKDNSDriver, self).__init__(*args, **kwargs) + + +def _rackspace_result_has_more(response, result_length, limit): + # If rackspace returns less than the limit, then we've reached the end of + # the result set. + if result_length < limit: + return False + + # Paginated results return links to the previous and next sets of data, but + # 'next' only exists when there is more to get. + for item in response.get('links', ()): + if item['rel'] == 'next': + return True + return False diff -Nru libcloud-0.5.0/libcloud/dns/drivers/route53.py libcloud-0.15.1/libcloud/dns/drivers/route53.py --- libcloud-0.5.0/libcloud/dns/drivers/route53.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/route53.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,527 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'Route53DNSDriver' +] + +import base64 +import hmac +import datetime +import uuid +import copy +from libcloud.utils.py3 import httplib + +from hashlib import sha1 + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import b, urlencode + +from libcloud.utils.xml import findtext, findall, fixxpath +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record +from libcloud.common.types import LibcloudError +from libcloud.common.aws import AWSGenericResponse +from libcloud.common.base import ConnectionUserAndKey + + +API_VERSION = '2012-02-29' +API_HOST = 'route53.amazonaws.com' +API_ROOT = '/%s/' % (API_VERSION) + +NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT) + + +class InvalidChangeBatch(LibcloudError): + pass + + +class Route53DNSResponse(AWSGenericResponse): + """ + Amazon Route53 response class. + """ + + namespace = NAMESPACE + xpath = 'Error' + + exceptions = { + 'NoSuchHostedZone': ZoneDoesNotExistError, + 'InvalidChangeBatch': InvalidChangeBatch, + } + + +class Route53Connection(ConnectionUserAndKey): + host = API_HOST + responseCls = Route53DNSResponse + + def pre_connect_hook(self, params, headers): + time_string = datetime.datetime.utcnow() \ + .strftime('%a, %d %b %Y %H:%M:%S GMT') + headers['Date'] = time_string + tmp = [] + + signature = self._get_aws_auth_b64(self.key, time_string) + auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature, + 'Algorithm': 'HmacSHA1'} + + for k, v in auth.items(): + tmp.append('%s=%s' % (k, v)) + + headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp) + + return params, headers + + def _get_aws_auth_b64(self, secret_key, time_string): + b64_hmac = base64.b64encode( + hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest() + ) + + return b64_hmac.decode('utf-8') + + +class Route53DNSDriver(DNSDriver): + type = Provider.ROUTE53 + name = 'Route53 DNS' + website = 'http://aws.amazon.com/route53/' + connectionCls = Route53Connection + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.SOA: 'SOA', + RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + } + + def iterate_zones(self): + return self._get_more('zones') + + def iterate_records(self, zone): + return self._get_more('records', zone=zone) + + def get_zone(self, zone_id): + self.connection.set_context({'zone_id': zone_id}) + uri = API_ROOT + 'hostedzone/' + zone_id + data = self.connection.request(uri).object + elem = findall(element=data, xpath='HostedZone', + namespace=NAMESPACE)[0] + return self._to_zone(elem) + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + record_type, name = record_id.split(':', 1) + if name: + full_name = ".".join((name, zone.domain)) + else: + full_name = zone.domain + self.connection.set_context({'zone_id': zone_id}) + params = urlencode({ + 'name': full_name, + 'type': record_type, + 'maxitems': '1' + }) + uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params + data = self.connection.request(uri).object + + record = self._to_records(data=data, zone=zone)[0] + + # A cute aspect of the /rrset filters is that they are more pagination + # hints than filters!! + # So will return a result even if its not what you asked for. + record_type_num = self._string_to_record_type(record_type) + if record.name != name or record.type != record_type_num: + raise RecordDoesNotExistError(value='', driver=self, + record_id=record_id) + + return record + + def create_zone(self, domain, type='master', ttl=None, extra=None): + zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE}) + ET.SubElement(zone, 'Name').text = domain + ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4()) + + if extra and 'Comment' in extra: + hzg = ET.SubElement(zone, 'HostedZoneConfig') + ET.SubElement(hzg, 'Comment').text = extra['Comment'] + + uri = API_ROOT + 'hostedzone' + data = ET.tostring(zone) + rsp = self.connection.request(uri, method='POST', data=data).object + + elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0] + return self._to_zone(elem=elem) + + def delete_zone(self, zone, ex_delete_records=False): + self.connection.set_context({'zone_id': zone.id}) + + if ex_delete_records: + self.ex_delete_all_records(zone=zone) + + uri = API_ROOT + 'hostedzone/%s' % (zone.id) + response = self.connection.request(uri, method='DELETE') + return response.status in [httplib.OK] + + def create_record(self, name, zone, type, data, extra=None): + extra = extra or {} + batch = [('CREATE', name, type, data, extra)] + self._post_changeset(zone, batch) + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + return Record(id=id, name=name, type=type, data=data, zone=zone, + driver=self, extra=extra) + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + name = name or record.name + type = type or record.type + extra = extra or record.extra + + if not extra: + extra = record.extra + + # Multiple value records need to be handled specially - we need to + # pass values for other records as well + multiple_value_record = record.extra.get('_multi_value', False) + other_records = record.extra.get('_other_records', []) + + if multiple_value_record and other_records: + self._update_multi_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + else: + self._update_single_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + return Record(id=id, name=name, type=type, data=data, zone=record.zone, + driver=self, extra=extra) + + def delete_record(self, record): + try: + r = record + batch = [('DELETE', r.name, r.type, r.data, r.extra)] + self._post_changeset(record.zone, batch) + except InvalidChangeBatch: + raise RecordDoesNotExistError(value='', driver=self, + record_id=r.id) + return True + + def ex_create_multi_value_record(self, name, zone, type, data, extra=None): + """ + Create a record with multiple values with a single call. + + :return: A list of created records. + :rtype: ``list`` of :class:`libcloud.dns.base.Record` + """ + extra = extra or {} + + attrs = {'xmlns': NAMESPACE} + changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) + batch = ET.SubElement(changeset, 'ChangeBatch') + changes = ET.SubElement(batch, 'Changes') + + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'CREATE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + # Value is provided as a multi line string + values = [value.strip() for value in data.split('\n') if + value.strip()] + + for value in values: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = value + + uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' + data = ET.tostring(changeset) + self.connection.set_context({'zone_id': zone.id}) + self.connection.request(uri, method='POST', data=data) + + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + + records = [] + for value in values: + record = Record(id=id, name=name, type=type, data=value, zone=zone, + driver=self, extra=extra) + records.append(record) + + return record + + def ex_delete_all_records(self, zone): + """ + Remove all the records for the provided zone. + + :param zone: Zone to delete records for. + :type zone: :class:`Zone` + """ + deletions = [] + for r in zone.list_records(): + if r.type in (RecordType.NS, RecordType.SOA): + continue + deletions.append(('DELETE', r.name, r.type, r.data, r.extra)) + + if deletions: + self._post_changeset(zone, deletions) + + def _update_single_value_record(self, record, name=None, type=None, + data=None, extra=None): + batch = [ + ('DELETE', record.name, record.type, record.data, record.extra), + ('CREATE', name, type, data, extra) + ] + + return self._post_changeset(record.zone, batch) + + def _update_multi_value_record(self, record, name=None, type=None, + data=None, extra=None): + other_records = record.extra.get('_other_records', []) + + attrs = {'xmlns': NAMESPACE} + changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) + batch = ET.SubElement(changeset, 'ChangeBatch') + changes = ET.SubElement(batch, 'Changes') + + # Delete existing records + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'DELETE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = record.name + '.' + \ + record.zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type] + ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = record.data + + for other_record in other_records: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = other_record['data'] + + # Re-create new (updated) records. Since we are updating a multi value + # record, only a single record is updated and others are left as is. + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'CREATE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = name + '.' + record.zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = data + + for other_record in other_records: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = other_record['data'] + + uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset' + data = ET.tostring(changeset) + self.connection.set_context({'zone_id': record.zone.id}) + response = self.connection.request(uri, method='POST', data=data) + + return response.status == httplib.OK + + def _post_changeset(self, zone, changes_list): + attrs = {'xmlns': NAMESPACE} + changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) + batch = ET.SubElement(changeset, 'ChangeBatch') + changes = ET.SubElement(batch, 'Changes') + + for action, name, type_, data, extra in changes_list: + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = action + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_] + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = data + + uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' + data = ET.tostring(changeset) + self.connection.set_context({'zone_id': zone.id}) + response = self.connection.request(uri, method='POST', data=data) + + return response.status == httplib.OK + + def _to_zones(self, data): + zones = [] + for element in data.findall(fixxpath(xpath='HostedZones/HostedZone', + namespace=NAMESPACE)): + zones.append(self._to_zone(element)) + + return zones + + def _to_zone(self, elem): + name = findtext(element=elem, xpath='Name', namespace=NAMESPACE) + id = findtext(element=elem, xpath='Id', + namespace=NAMESPACE).replace('/hostedzone/', '') + comment = findtext(element=elem, xpath='Config/Comment', + namespace=NAMESPACE) + resource_record_count = int(findtext(element=elem, + xpath='ResourceRecordSetCount', + namespace=NAMESPACE)) + + extra = {'Comment': comment, 'ResourceRecordSetCount': + resource_record_count} + + zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self, + extra=extra) + return zone + + def _to_records(self, data, zone): + records = [] + elems = data.findall( + fixxpath(xpath='ResourceRecordSets/ResourceRecordSet', + namespace=NAMESPACE)) + for elem in elems: + record_set = elem.findall(fixxpath( + xpath='ResourceRecords/ResourceRecord', + namespace=NAMESPACE)) + record_count = len(record_set) + multiple_value_record = (record_count > 1) + + record_set_records = [] + + for index, record in enumerate(record_set): + # Need to special handling for records with multiple values for + # update to work correctly + record = self._to_record(elem=elem, zone=zone, index=index) + record.extra['_multi_value'] = multiple_value_record + + if multiple_value_record: + record.extra['_other_records'] = [] + + record_set_records.append(record) + + # Store reference to other records so update works correctly + if multiple_value_record: + for index in range(0, len(record_set_records)): + record = record_set_records[index] + + for other_index, other_record in \ + enumerate(record_set_records): + if index == other_index: + # Skip current record + continue + + extra = copy.deepcopy(other_record.extra) + extra.pop('_multi_value') + extra.pop('_other_records') + + item = {'name': other_record.name, + 'data': other_record.data, + 'type': other_record.type, + 'extra': extra} + record.extra['_other_records'].append(item) + + records.extend(record_set_records) + + return records + + def _to_record(self, elem, zone, index=0): + name = findtext(element=elem, xpath='Name', + namespace=NAMESPACE) + name = name[:-len(zone.domain) - 1] + + type = self._string_to_record_type(findtext(element=elem, xpath='Type', + namespace=NAMESPACE)) + ttl = int(findtext(element=elem, xpath='TTL', namespace=NAMESPACE)) + + value_elem = elem.findall( + fixxpath(xpath='ResourceRecords/ResourceRecord', + namespace=NAMESPACE))[index] + data = findtext(element=(value_elem), xpath='Value', + namespace=NAMESPACE) + + extra = {'ttl': ttl} + + if type == 'MX': + split = data.split() + priority, data = split + extra['priority'] = int(priority) + elif type == 'SRV': + split = data.split() + priority, weight, port, data = split + extra['priority'] = int(priority) + extra['weight'] = int(weight) + extra['port'] = int(port) + + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + record = Record(id=id, name=name, type=type, data=data, zone=zone, + driver=self, extra=extra) + return record + + def _get_more(self, rtype, **kwargs): + exhausted = False + last_key = None + while not exhausted: + items, last_key, exhausted = self._get_data(rtype, last_key, + **kwargs) + for item in items: + yield item + + def _get_data(self, rtype, last_key, **kwargs): + params = {} + if last_key: + params['name'] = last_key + path = API_ROOT + 'hostedzone' + + if rtype == 'zones': + response = self.connection.request(path, params=params) + transform_func = self._to_zones + elif rtype == 'records': + zone = kwargs['zone'] + path += '/%s/rrset' % (zone.id) + self.connection.set_context({'zone_id': zone.id}) + response = self.connection.request(path, params=params) + transform_func = self._to_records + + if response.status == httplib.OK: + is_truncated = findtext(element=response.object, + xpath='IsTruncated', + namespace=NAMESPACE) + exhausted = is_truncated != 'true' + last_key = findtext(element=response.object, + xpath='NextRecordName', + namespace=NAMESPACE) + items = transform_func(data=response.object, **kwargs) + return items, last_key, exhausted + else: + return [], None, True diff -Nru libcloud-0.5.0/libcloud/dns/drivers/zerigo.py libcloud-0.15.1/libcloud/dns/drivers/zerigo.py --- libcloud-0.5.0/libcloud/dns/drivers/zerigo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/drivers/zerigo.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,484 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'ZerigoDNSDriver' +] + + +import copy +import base64 + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.misc import merge_valid_keys, get_new_obj +from libcloud.utils.xml import findtext, findall +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.types import MalformedResponseError +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + +API_HOST = 'ns.zerigo.com' +API_VERSION = '1.1' +API_ROOT = '/api/%s/' % (API_VERSION) + +VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers'] +VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority'] + +# Number of items per page (maximum limit is 1000) +ITEMS_PER_PAGE = 100 + + +class ZerigoError(LibcloudError): + def __init__(self, code, errors): + self.code = code + self.errors = errors or [] + + def __str__(self): + return 'Errors: %s' % (', '.join(self.errors)) + + def __repr__(self): + return ('' % ( + self.code, len(self.errors))) + + +class ZerigoDNSResponse(XmlResponse): + def success(self): + return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] + + def parse_error(self): + status = int(self.status) + + if status == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + elif status == 404: + context = self.connection.context + if context['resource'] == 'zone': + raise ZoneDoesNotExistError(value='', driver=self, + zone_id=context['id']) + elif context['resource'] == 'record': + raise RecordDoesNotExistError(value='', driver=self, + record_id=context['id']) + elif status != 503: + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError('Failed to parse XML', + body=self.body) + + errors = [] + for error in findall(element=body, xpath='error'): + errors.append(error.text) + + raise ZerigoError(code=status, errors=errors) + + return self.body + + +class ZerigoDNSConnection(ConnectionUserAndKey): + host = API_HOST + secure = True + responseCls = ZerigoDNSResponse + + def add_default_headers(self, headers): + auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) + headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8')) + return headers + + def request(self, action, params=None, data='', headers=None, + method='GET'): + if not headers: + headers = {} + if not params: + params = {} + + if method in ("POST", "PUT"): + headers = {'Content-Type': 'application/xml; charset=UTF-8'} + return super(ZerigoDNSConnection, self).request(action=action, + params=params, + data=data, + method=method, + headers=headers) + + +class ZerigoDNSDriver(DNSDriver): + type = Provider.ZERIGO + name = 'Zerigo DNS' + website = 'http://www.zerigo.com/' + connectionCls = ZerigoDNSConnection + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.GEO: 'GEO', + RecordType.MX: 'MX', + RecordType.NAPTR: 'NAPTR', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.REDIRECT: 'REDIRECT', + RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + RecordType.URL: 'URL', + } + + def iterate_zones(self): + return self._get_more('zones') + + def iterate_records(self, zone): + return self._get_more('records', zone=zone) + + def get_zone(self, zone_id): + path = API_ROOT + 'zones/%s.xml' % (zone_id) + self.connection.set_context({'resource': 'zone', 'id': zone_id}) + data = self.connection.request(path).object + zone = self._to_zone(elem=data) + return zone + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + self.connection.set_context({'resource': 'record', 'id': record_id}) + path = API_ROOT + 'hosts/%s.xml' % (record_id) + data = self.connection.request(path).object + record = self._to_record(elem=data, zone=zone) + return record + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/zones/create + + @inherits: :class:`DNSDriver.create_zone` + """ + path = API_ROOT + 'zones.xml' + zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, + extra=extra) + data = self.connection.request(action=path, + data=ET.tostring(zone_elem), + method='POST').object + zone = self._to_zone(elem=data) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + """ + Update an existing zone. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/zones/update + + @inherits: :class:`DNSDriver.update_zone` + """ + if domain: + raise LibcloudError('Domain cannot be changed', driver=self) + + path = API_ROOT + 'zones/%s.xml' % (zone.id) + zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(zone_elem), + method='PUT') + assert response.status == httplib.OK + + merged = merge_valid_keys(params=copy.deepcopy(zone.extra), + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'type': type, + 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/hosts/create + + @inherits: :class:`DNSDriver.create_record` + """ + path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) + record_elem = self._to_record_elem(name=name, type=type, data=data, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(record_elem), + method='POST') + assert response.status == httplib.CREATED + record = self._to_record(elem=response.object, zone=zone) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + path = API_ROOT + 'hosts/%s.xml' % (record.id) + record_elem = self._to_record_elem(name=name, type=type, data=data, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(record_elem), + method='PUT') + assert response.status == httplib.OK + + merged = merge_valid_keys(params=copy.deepcopy(record.extra), + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'type': type, + 'data': data, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + path = API_ROOT + 'zones/%s.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(action=path, method='DELETE') + return response.status == httplib.OK + + def delete_record(self, record): + path = API_ROOT + 'hosts/%s.xml' % (record.id) + self.connection.set_context({'resource': 'record', 'id': record.id}) + response = self.connection.request(action=path, method='DELETE') + return response.status == httplib.OK + + def ex_get_zone_by_domain(self, domain): + """ + Retrieve a zone object by the domain name. + + :param domain: The domain which should be used + :type domain: ``str`` + + :rtype: :class:`Zone` + """ + path = API_ROOT + 'zones/%s.xml' % (domain) + self.connection.set_context({'resource': 'zone', 'id': domain}) + data = self.connection.request(path).object + zone = self._to_zone(elem=data) + return zone + + def ex_force_slave_axfr(self, zone): + """ + Force a zone transfer. + + :param zone: Zone which should be used. + :type zone: :class:`Zone` + + :rtype: :class:`Zone` + """ + path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(path, method='POST') + assert response.status == httplib.ACCEPTED + return zone + + def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None): + zone_elem = ET.Element('zone', {}) + + if domain: + domain_elem = ET.SubElement(zone_elem, 'domain') + domain_elem.text = domain + + if type: + ns_type_elem = ET.SubElement(zone_elem, 'ns-type') + + if type == 'master': + ns_type_elem.text = 'pri_sec' + elif type == 'slave': + if not extra or 'ns1' not in extra: + raise LibcloudError('ns1 extra attribute is required ' + + 'when zone type is slave', driver=self) + + ns_type_elem.text = 'sec' + ns1_elem = ET.SubElement(zone_elem, 'ns1') + ns1_elem.text = extra['ns1'] + elif type == 'std_master': + # TODO: Each driver should provide supported zone types + # Slave name servers are elsewhere + if not extra or 'slave-nameservers' not in extra: + raise LibcloudError('slave-nameservers extra ' + + 'attribute is required whenzone ' + + 'type is std_master', driver=self) + + ns_type_elem.text = 'pri' + slave_nameservers_elem = ET.SubElement(zone_elem, + 'slave-nameservers') + slave_nameservers_elem.text = extra['slave-nameservers'] + + if ttl: + default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl') + default_ttl_elem.text = str(ttl) + + if extra and 'tag-list' in extra: + tags = extra['tag-list'] + + tags_elem = ET.SubElement(zone_elem, 'tag-list') + tags_elem.text = ' '.join(tags) + + return zone_elem + + def _to_record_elem(self, name=None, type=None, data=None, extra=None): + record_elem = ET.Element('host', {}) + + if name: + name_elem = ET.SubElement(record_elem, 'hostname') + name_elem.text = name + + if type is not None: + type_elem = ET.SubElement(record_elem, 'host-type') + type_elem.text = self.RECORD_TYPE_MAP[type] + + if data: + data_elem = ET.SubElement(record_elem, 'data') + data_elem.text = data + + if extra: + if 'ttl' in extra: + ttl_elem = ET.SubElement(record_elem, 'ttl', + {'type': 'integer'}) + ttl_elem.text = str(extra['ttl']) + + if 'priority' in extra: + # Only MX and SRV records support priority + priority_elem = ET.SubElement(record_elem, 'priority', + {'type': 'integer'}) + + priority_elem.text = str(extra['priority']) + + if 'notes' in extra: + notes_elem = ET.SubElement(record_elem, 'notes') + notes_elem.text = extra['notes'] + + return record_elem + + def _to_zones(self, elem): + zones = [] + + for item in findall(element=elem, xpath='zone'): + zone = self._to_zone(elem=item) + zones.append(zone) + + return zones + + def _to_zone(self, elem): + id = findtext(element=elem, xpath='id') + domain = findtext(element=elem, xpath='domain') + type = findtext(element=elem, xpath='ns-type') + type = 'master' if type.find('pri') == 0 else 'slave' + ttl = findtext(element=elem, xpath='default-ttl') + + hostmaster = findtext(element=elem, xpath='hostmaster') + custom_ns = findtext(element=elem, xpath='custom-ns') + custom_nameservers = findtext(element=elem, xpath='custom-nameservers') + notes = findtext(element=elem, xpath='notes') + nx_ttl = findtext(element=elem, xpath='nx-ttl') + slave_nameservers = findtext(element=elem, xpath='slave-nameservers') + tags = findtext(element=elem, xpath='tag-list') + tags = tags.split(' ') if tags else [] + + extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns, + 'custom-nameservers': custom_nameservers, 'notes': notes, + 'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers, + 'tags': tags} + zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), + driver=self, extra=extra) + return zone + + def _to_records(self, elem, zone): + records = [] + + for item in findall(element=elem, xpath='host'): + record = self._to_record(elem=item, zone=zone) + records.append(record) + + return records + + def _to_record(self, elem, zone): + id = findtext(element=elem, xpath='id') + name = findtext(element=elem, xpath='hostname') + type = findtext(element=elem, xpath='host-type') + type = self._string_to_record_type(type) + data = findtext(element=elem, xpath='data') + + notes = findtext(element=elem, xpath='notes', no_text_value=None) + state = findtext(element=elem, xpath='state', no_text_value=None) + fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None) + priority = findtext(element=elem, xpath='priority', no_text_value=None) + ttl = findtext(element=elem, xpath='ttl', no_text_value=None) + + if not name: + name = None + + if ttl: + ttl = int(ttl) + + extra = {'notes': notes, 'state': state, 'fqdn': fqdn, + 'priority': priority, 'ttl': ttl} + + record = Record(id=id, name=name, type=type, data=data, + zone=zone, driver=self, extra=extra) + return record + + def _get_more(self, rtype, **kwargs): + exhausted = False + last_key = None + + while not exhausted: + items, last_key, exhausted = self._get_data(rtype, last_key, + **kwargs) + + for item in items: + yield item + + def _get_data(self, rtype, last_key, **kwargs): + # Note: last_key in this case really is a "last_page". + # TODO: Update base driver and change last_key to something more + # generic - e.g. marker + params = {} + params['per_page'] = ITEMS_PER_PAGE + params['page'] = last_key + 1 if last_key else 1 + + if rtype == 'zones': + path = API_ROOT + 'zones.xml' + response = self.connection.request(path) + transform_func = self._to_zones + elif rtype == 'records': + zone = kwargs['zone'] + path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(path, params=params) + transform_func = self._to_records + + exhausted = False + result_count = int(response.headers.get('x-query-count', 0)) + + if (params['page'] * ITEMS_PER_PAGE) >= result_count: + exhausted = True + + if response.status == httplib.OK: + items = transform_func(elem=response.object, **kwargs) + return items, params['page'], exhausted + else: + return [], None, True diff -Nru libcloud-0.5.0/libcloud/dns/providers.py libcloud-0.15.1/libcloud/dns/providers.py --- libcloud-0.5.0/libcloud/dns/providers.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/providers.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils.misc import get_driver as get_provider_driver +from libcloud.utils.misc import set_driver as set_provider_driver +from libcloud.dns.types import Provider + +DRIVERS = { + Provider.DUMMY: + ('libcloud.dns.drivers.dummy', 'DummyDNSDriver'), + Provider.LINODE: + ('libcloud.dns.drivers.linode', 'LinodeDNSDriver'), + Provider.ZERIGO: + ('libcloud.dns.drivers.zerigo', 'ZerigoDNSDriver'), + Provider.RACKSPACE: + ('libcloud.dns.drivers.rackspace', 'RackspaceDNSDriver'), + Provider.HOSTVIRTUAL: + ('libcloud.dns.drivers.hostvirtual', 'HostVirtualDNSDriver'), + Provider.ROUTE53: + ('libcloud.dns.drivers.route53', 'Route53DNSDriver'), + Provider.GANDI: + ('libcloud.dns.drivers.gandi', 'GandiDNSDriver'), + Provider.GOOGLE: ('libcloud.dns.drivers.google', 'GoogleDNSDriver'), + # Deprecated + Provider.RACKSPACE_US: + ('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'), + Provider.RACKSPACE_UK: + ('libcloud.dns.drivers.rackspace', 'RackspaceUKDNSDriver') +} + + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) + + +def set_driver(provider, module, klass): + return set_provider_driver(DRIVERS, provider, module, klass) diff -Nru libcloud-0.5.0/libcloud/dns/types.py libcloud-0.15.1/libcloud/dns/types.py --- libcloud-0.5.0/libcloud/dns/types.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/dns/types.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,115 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError + +__all__ = [ + 'Provider', + 'RecordType', + 'ZoneError', + 'ZoneDoesNotExistError', + 'ZoneAlreadyExistsError', + 'RecordError', + 'RecordDoesNotExistError', + 'RecordAlreadyExistsError' +] + + +class Provider(object): + DUMMY = 'dummy' + LINODE = 'linode' + RACKSPACE = 'rackspace' + ZERIGO = 'zerigo' + ROUTE53 = 'route53' + HOSTVIRTUAL = 'hostvirtual' + GANDI = 'gandi' + GOOGLE = 'google' + + # Deprecated + RACKSPACE_US = 'rackspace_us' + RACKSPACE_UK = 'rackspace_uk' + + +class RecordType(object): + """ + DNS record type. + """ + A = 'A' + AAAA = 'AAAA' + MX = 'MX' + NS = 'NS' + CNAME = 'CNAME' + DNAME = 'DNAME' + TXT = 'TXT' + PTR = 'PTR' + SOA = 'SOA' + SPF = 'SPF' + SRV = 'SRV' + PTR = 'PTR' + NAPTR = 'NAPTR' + REDIRECT = 'REDIRECT' + GEO = 'GEO' + URL = 'URL' + WKS = 'WKS' + LOC = 'LOC' + + +class ZoneError(LibcloudError): + error_type = 'ZoneError' + kwargs = ('zone_id', ) + + def __init__(self, value, driver, zone_id): + self.zone_id = zone_id + super(ZoneError, self).__init__(value=value, driver=driver) + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return ('<%s in %s, zone_id=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.zone_id, self.value)) + + +class ZoneDoesNotExistError(ZoneError): + error_type = 'ZoneDoesNotExistError' + + +class ZoneAlreadyExistsError(ZoneError): + error_type = 'ZoneAlreadyExistsError' + + +class RecordError(LibcloudError): + error_type = 'RecordError' + + def __init__(self, value, driver, record_id): + self.record_id = record_id + super(RecordError, self).__init__(value=value, driver=driver) + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return ('<%s in %s, record_id=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.record_id, self.value)) + + +class RecordDoesNotExistError(RecordError): + error_type = 'RecordDoesNotExistError' + + +class RecordAlreadyExistsError(RecordError): + error_type = 'RecordAlreadyExistsError' diff -Nru libcloud-0.5.0/libcloud/drivers/brightbox.py libcloud-0.15.1/libcloud/drivers/brightbox.py --- libcloud-0.5.0/libcloud/drivers/brightbox.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/brightbox.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.brightbox import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/cloudsigma.py libcloud-0.15.1/libcloud/drivers/cloudsigma.py --- libcloud-0.5.0/libcloud/drivers/cloudsigma.py 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/cloudsigma.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning - -from libcloud.compute.drivers.cloudsigma import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/dreamhost.py libcloud-0.15.1/libcloud/drivers/dreamhost.py --- libcloud-0.5.0/libcloud/drivers/dreamhost.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/dreamhost.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.dreamhost import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/dummy.py libcloud-0.15.1/libcloud/drivers/dummy.py --- libcloud-0.5.0/libcloud/drivers/dummy.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/dummy.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.dummy import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/ec2.py libcloud-0.15.1/libcloud/drivers/ec2.py --- libcloud-0.5.0/libcloud/drivers/ec2.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/ec2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.ec2 import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/ecp.py libcloud-0.15.1/libcloud/drivers/ecp.py --- libcloud-0.5.0/libcloud/drivers/ecp.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/ecp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.ecp import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/elastichosts.py libcloud-0.15.1/libcloud/drivers/elastichosts.py --- libcloud-0.5.0/libcloud/drivers/elastichosts.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/elastichosts.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.elastichosts import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/gogrid.py libcloud-0.15.1/libcloud/drivers/gogrid.py --- libcloud-0.5.0/libcloud/drivers/gogrid.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/gogrid.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.gogrid import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/ibm_sbc.py libcloud-0.15.1/libcloud/drivers/ibm_sbc.py --- libcloud-0.5.0/libcloud/drivers/ibm_sbc.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/ibm_sbc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.ibm_sbc import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/__init__.py libcloud-0.15.1/libcloud/drivers/__init__.py --- libcloud-0.5.0/libcloud/drivers/__init__.py 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Drivers for working with different providers -""" - -__all__ = [ - 'brightbox', - 'dummy', - 'ec2', - 'ecp', - 'elastichosts', - 'cloudsigma', - 'gogrid', - 'ibm_sbc', - 'linode', - 'opennebula', - 'rackspace', - 'rimuhosting', - 'slicehost', - 'softlayer', - 'vcloud', - 'voxel', - 'vpsnet' -] diff -Nru libcloud-0.5.0/libcloud/drivers/linode.py libcloud-0.15.1/libcloud/drivers/linode.py --- libcloud-0.5.0/libcloud/drivers/linode.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/linode.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.linode import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/opennebula.py libcloud-0.15.1/libcloud/drivers/opennebula.py --- libcloud-0.5.0/libcloud/drivers/opennebula.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/opennebula.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad -# Complutense de Madrid (dsa-research.org) -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.opennebula import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/rackspace.py libcloud-0.15.1/libcloud/drivers/rackspace.py --- libcloud-0.5.0/libcloud/drivers/rackspace.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/rackspace.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.rackspace import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/rimuhosting.py libcloud-0.15.1/libcloud/drivers/rimuhosting.py --- libcloud-0.5.0/libcloud/drivers/rimuhosting.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/rimuhosting.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.rimuhosting import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/slicehost.py libcloud-0.15.1/libcloud/drivers/slicehost.py --- libcloud-0.5.0/libcloud/drivers/slicehost.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/slicehost.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.slicehost import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/softlayer.py libcloud-0.15.1/libcloud/drivers/softlayer.py --- libcloud-0.5.0/libcloud/drivers/softlayer.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/softlayer.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.softlayer import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/vcloud.py libcloud-0.15.1/libcloud/drivers/vcloud.py --- libcloud-0.5.0/libcloud/drivers/vcloud.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/vcloud.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.vcloud import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/voxel.py libcloud-0.15.1/libcloud/drivers/voxel.py --- libcloud-0.5.0/libcloud/drivers/voxel.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/voxel.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.voxel import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/drivers/vpsnet.py libcloud-0.15.1/libcloud/drivers/vpsnet.py --- libcloud-0.5.0/libcloud/drivers/vpsnet.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/drivers/vpsnet.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.drivers.vpsnet import * - -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/httplib_ssl.py libcloud-0.15.1/libcloud/httplib_ssl.py --- libcloud-0.5.0/libcloud/httplib_ssl.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/httplib_ssl.py 2013-12-12 09:01:49.000000000 +0000 @@ -16,7 +16,6 @@ Subclass for httplib.HTTPSConnection with optional certificate name verification, depending on libcloud.security settings. """ -import httplib import os import re import socket @@ -24,24 +23,29 @@ import warnings import libcloud.security +from libcloud.utils.py3 import httplib + class LibcloudHTTPSConnection(httplib.HTTPSConnection): - """LibcloudHTTPSConnection + """ + LibcloudHTTPSConnection Subclass of HTTPSConnection which verifies certificate names if and only if CA certificates are available. """ - verify = False # does not verify + verify = True # verify by default ca_cert = None # no default CA Certificate def __init__(self, *args, **kwargs): - """Constructor + """ + Constructor """ self._setup_verify() httplib.HTTPSConnection.__init__(self, *args, **kwargs) def _setup_verify(self): - """Setup Verify SSL or not + """ + Setup Verify SSL or not Reads security module's VERIFY_SSL_CERT and toggles whether the class overrides the connect() class method or runs the @@ -55,7 +59,8 @@ warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG) def _setup_ca_cert(self): - """Setup CA Certs + """ + Setup CA Certs Search in CA_CERTS_PATH for valid candidates and return first match. Otherwise, complain about certs @@ -66,18 +71,17 @@ ca_certs_available = [cert for cert in libcloud.security.CA_CERTS_PATH - if os.path.exists(cert)] + if os.path.exists(cert) and os.path.isfile(cert)] if ca_certs_available: # use first available certificate self.ca_cert = ca_certs_available[0] else: - # no certificates found; toggle verify to False - warnings.warn(libcloud.security.CA_CERTS_UNAVAILABLE_MSG) - self.ca_cert = None - self.verify = False + raise RuntimeError( + libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG) def connect(self): - """Connect + """ + Connect Checks if verification is toggled; if not, just call httplib.HTTPSConnection's connect @@ -104,7 +108,8 @@ raise ssl.SSLError('Failed to verify hostname') def _verify_hostname(self, hostname, cert): - """Verify hostname against peer cert + """ + Verify hostname against peer cert Check both commonName and entries in subjectAltName, using a rudimentary glob to dns regex check to find matches @@ -114,17 +119,11 @@ # replace * with alphanumeric and dash # replace . with literal . + # http://www.dns.net/dnsrd/trick.html#legal-hostnames valid_patterns = [ - re.compile( - pattern.replace( - r".", r"\." - ).replace( - r"*", r"[0-9A-Za-z]+" - ) - ) - for pattern - in (set(common_name) | set(alt_names)) - ] + re.compile('^' + pattern.replace(r".", r"\.") + .replace(r"*", r"[0-9A-Za-z\-]+") + '$') + for pattern in (set(common_name) | set(alt_names))] return any( pattern.search(hostname) @@ -132,7 +131,8 @@ ) def _get_subject_alt_names(self, cert): - """Get SubjectAltNames + """ + Get SubjectAltNames Retrieve 'subjectAltName' attributes from cert data structure """ @@ -145,7 +145,8 @@ return values def _get_common_name(self, cert): - """Get Common Name + """ + Get Common Name Retrieve 'commonName' attribute from cert data structure """ diff -Nru libcloud-0.5.0/libcloud/__init__.py libcloud-0.15.1/libcloud/__init__.py --- libcloud-0.5.0/libcloud/__init__.py 2011-05-21 20:21:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/__init__.py 2014-07-02 20:50:11.000000000 +0000 @@ -16,43 +16,50 @@ """ libcloud provides a unified interface to the cloud computing resources. -@var __version__: Current version of libcloud +:var __version__: Current version of libcloud """ -__all__ = ["__version__", "enable_debug"] +__all__ = ['__version__', 'enable_debug'] +__version__ = '0.15.1' + +import os + +try: + import paramiko + have_paramiko = True +except ImportError: + have_paramiko = False -__version__ = "0.5.0" def enable_debug(fo): """ Enable library wide debugging to a file-like object. - @param fo: Where to append debugging information - @type fo: File like object, only write operations are used. + :param fo: Where to append debugging information + :type fo: File like object, only write operations are used. """ - from libcloud.base import (ConnectionKey, - LoggingHTTPConnection, - LoggingHTTPSConnection) + from libcloud.common.base import (Connection, + LoggingHTTPConnection, + LoggingHTTPSConnection) LoggingHTTPSConnection.log = fo LoggingHTTPConnection.log = fo - ConnectionKey.conn_classes = (LoggingHTTPConnection, LoggingHTTPSConnection) + Connection.conn_classes = (LoggingHTTPConnection, + LoggingHTTPSConnection) + def _init_once(): """ Utility function that is ran once on Library import. - This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists + This checks for the LIBCLOUD_DEBUG environment variable, which if it exists is where we will log debug information about the provider transports. - - If LIBCLOUD_DEBUG is not a path, C{/tmp/libcloud_debug.log} is used by - default. """ - import os - d = os.getenv("LIBCLOUD_DEBUG") - if d: - if d.isdigit(): - d = "/tmp/libcloud_debug.log" - fo = open(d, "a") + path = os.getenv('LIBCLOUD_DEBUG') + if path: + fo = open(path, 'a') enable_debug(fo) + if have_paramiko: + paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG) + _init_once() diff -Nru libcloud-0.5.0/libcloud/loadbalancer/base.py libcloud-0.15.1/libcloud/loadbalancer/base.py --- libcloud-0.5.0/libcloud/loadbalancer/base.py 2011-05-21 11:07:38.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/base.py 2013-11-29 12:35:04.000000000 +0000 @@ -13,201 +13,316 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libcloud.common.base import ConnectionKey +from libcloud.common.base import ConnectionKey, BaseDriver from libcloud.common.types import LibcloudError __all__ = [ - "Member", - "LoadBalancer", - "Driver", - "Algorithm" - ] + 'Member', + 'LoadBalancer', + 'Algorithm', + 'Driver', + 'DEFAULT_ALGORITHM' +] + class Member(object): + """ + Represents a load balancer member. + """ + + def __init__(self, id, ip, port, balancer=None, extra=None): + """ + :param id: Member ID. + :type id: ``str`` - def __init__(self, id, ip, port): + :param ip: IP address of this member. + :param ip: ``str`` + + :param port: Port of this member + :param port: ``str`` + + :param balancer: Balancer this member is attached to. (optional) + :param balancer: :class:`.LoadBalancer` + + :param extra: Provider specific attributes. + :type extra: ``dict`` + """ self.id = str(id) if id else None self.ip = ip self.port = port + self.balancer = balancer + self.extra = extra or {} def __repr__(self): return ('' % (self.id, - self.ip, self.port)) + self.ip, self.port)) -class Algorithm(object): - RANDOM = 0 - ROUND_ROBIN = 1 - LEAST_CONNECTIONS = 2 - -DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN class LoadBalancer(object): """ Provide a common interface for handling Load Balancers. """ - def __init__(self, id, name, state, ip, port, driver): + def __init__(self, id, name, state, ip, port, driver, extra=None): + """ + :param id: Load balancer ID. + :type id: ``str`` + + :param name: Load balancer name. + :type name: ``str`` + + :param state: State this loadbalancer is in. + :type state: :class:`libcloud.loadbalancer.types.State` + + :param ip: IP address of this loadbalancer. + :type ip: ``str`` + + :param port: Port of this loadbalancer. + :type port: ``int`` + + :param driver: Driver this loadbalancer belongs to. + :type driver: :class:`.Driver` + + :param extra: Provier specific attributes. (optional) + :type extra: ``dict`` + """ self.id = str(id) if id else None self.name = name self.state = state self.ip = ip self.port = port self.driver = driver + self.extra = extra or {} def attach_compute_node(self, node): - return self.driver.balancer_attach_compute_node(node) + return self.driver.balancer_attach_compute_node(balancer=self, + node=node) def attach_member(self, member): - return self.driver.balancer_attach_member(self, member) + return self.driver.balancer_attach_member(balancer=self, + member=member) def detach_member(self, member): - return self.driver.balancer_detach_member(self, member) + return self.driver.balancer_detach_member(balancer=self, + member=member) def list_members(self): - return self.driver.balancer_list_members(self) + return self.driver.balancer_list_members(balancer=self) + + def destroy(self): + return self.driver.destroy_balancer(balancer=self) def __repr__(self): return ('' % (self.id, self.name, self.state)) -class Driver(object): +class Algorithm(object): + """ + Represents a load balancing algorithm. """ - A base LBDriver class to derive from - This class is always subclassed by a specific driver. + RANDOM = 0 + ROUND_ROBIN = 1 + LEAST_CONNECTIONS = 2 + WEIGHTED_ROUND_ROBIN = 3 + WEIGHTED_LEAST_CONNECTIONS = 4 + +DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN + +class Driver(BaseDriver): + """ + A base Driver class to derive from + + This class is always subclassed by a specific driver. """ + name = None + website = None + connectionCls = ConnectionKey _ALGORITHM_TO_VALUE_MAP = {} _VALUE_TO_ALGORITHM_MAP = {} - def __init__(self, key, secret=None, secure=True): - self.key = key - self.secret = secret - args = [self.key] - - if self.secret is not None: - args.append(self.secret) - - args.append(secure) - - self.connection = self.connectionCls(*args) - self.connection.driver = self - self.connection.connect() + def __init__(self, key, secret=None, secure=True, host=None, + port=None, **kwargs): + super(Driver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port, **kwargs) def list_protocols(self): """ Return a list of supported protocols. - """ - raise NotImplementedError, \ - 'list_protocols not implemented for this driver' + :rtype: ``list`` of ``str`` + """ + raise NotImplementedError( + 'list_protocols not implemented for this driver') def list_balancers(self): """ List all loadbalancers - @return: C{list} of L{LoadBalancer} objects - + :rtype: ``list`` of :class:`LoadBalancer` """ - - raise NotImplementedError, \ - 'list_balancers not implemented for this driver' + raise NotImplementedError( + 'list_balancers not implemented for this driver') def create_balancer(self, name, port, protocol, algorithm, members): """ Create a new load balancer instance - @keyword name: Name of the new load balancer (required) - @type name: C{str} - @keyword members: C{list} ofL{Member}s to attach to balancer - @type: C{list} of L{Member}s - @keyword protocol: Loadbalancer protocol, defaults to http. - @type: C{str} - @keyword port: Port the load balancer should listen on, defaults to 80 - @type port: C{str} - @keyword algorithm: Load balancing algorithm, defaults to - LBAlgorithm.ROUND_ROBIN - @type algorithm: C{LBAlgorithm} + :param name: Name of the new load balancer (required) + :type name: ``str`` - """ + :param port: Port the load balancer should listen on, defaults to 80 + :type port: ``str`` + + :param protocol: Loadbalancer protocol, defaults to http. + :type protocol: ``str`` + + :param members: list of Members to attach to balancer + :type members: ``list`` of :class:`Member` + + :param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. + :type algorithm: :class:`Algorithm` - raise NotImplementedError, \ - 'create_balancer not implemented for this driver' + :rtype: :class:`LoadBalancer` + """ + raise NotImplementedError( + 'create_balancer not implemented for this driver') def destroy_balancer(self, balancer): - """Destroy a load balancer + """ + Destroy a load balancer - @return: C{bool} True if the destroy was successful, otherwise False + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + :return: ``True`` if the destroy was successful, otherwise ``False``. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'destroy_balancer not implemented for this driver' + raise NotImplementedError( + 'destroy_balancer not implemented for this driver') def get_balancer(self, balancer_id): """ - Return a C{LoadBalancer} object. + Return a :class:`LoadBalancer` object. + + :param balancer_id: id of a load balancer you want to fetch + :type balancer_id: ``str`` + + :rtype: :class:`LoadBalancer` + """ - @keyword balancer_id: id of a load balancer you want to fetch - @type balancer_id: C{str} + raise NotImplementedError( + 'get_balancer not implemented for this driver') - @return: C{LoadBalancer} + def update_balancer(self, balancer, **kwargs): """ + Sets the name, algorithm, protocol, or port on a load balancer. + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param name: New load balancer name + :type name: ``str`` + + :param algorithm: New load balancer algorithm + :type algorithm: :class:`Algorithm` + + :param protocol: New load balancer protocol + :type protocol: ``str`` - raise NotImplementedError, \ - 'get_balancer not implemented for this driver' + :param port: New load balancer port + :type port: ``int`` + + :rtype: :class:`LoadBalancer` + """ + raise NotImplementedError( + 'update_balancer not implemented for this driver') def balancer_attach_compute_node(self, balancer, node): - """ - Attach a compute node as a member to the load balancer. + """ + Attach a compute node as a member to the load balancer. - @keyword node: Member to join to the balancer - @type member: C{libcloud.compute.base.Node} - @return {Member} Member after joining the balancer. - """ + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` - return self.attach_member(Member(None, node.public_ip[0], balancer.port)) + :param node: Node to join to the balancer + :type node: :class:`Node` + + :return: Member after joining the balancer. + :rtype: :class:`Member` + """ + + member = Member(id=None, ip=node.public_ips[0], port=balancer.port) + return self.balancer_attach_member(balancer, member) def balancer_attach_member(self, balancer, member): """ Attach a member to balancer - @keyword member: Member to join to the balancer - @type member: C{Member} - @return {Member} Member after joining the balancer. + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param member: Member to join to the balancer + :type member: :class:`Member` + + :return: Member after joining the balancer. + :rtype: :class:`Member` """ - raise NotImplementedError, \ - 'balancer_attach_member not implemented for this driver' + raise NotImplementedError( + 'balancer_attach_member not implemented for this driver') def balancer_detach_member(self, balancer, member): """ Detach member from balancer - @return: C{bool} True if member detach was successful, otherwise False + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param member: Member which should be used + :type member: :class:`Member` + :return: ``True`` if member detach was successful, otherwise ``False``. + :rtype: ``bool`` """ - raise NotImplementedError, \ - 'balancer_detach_member not implemented for this driver' + raise NotImplementedError( + 'balancer_detach_member not implemented for this driver') def balancer_list_members(self, balancer): """ Return list of members attached to balancer - @return: C{list} of L{Member}s + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + :rtype: ``list`` of :class:`Member` """ - raise NotImplementedError, \ - 'balancer_list_members not implemented for this driver' + raise NotImplementedError( + 'balancer_list_members not implemented for this driver') + + def list_supported_algorithms(self): + """ + Return algorithms supported by this driver. + + :rtype: ``list`` of ``str`` + """ + return list(self._ALGORITHM_TO_VALUE_MAP.keys()) def _value_to_algorithm(self, value): """ - Return C{LBAlgorithm} based on the value. + Return :class`Algorithm` based on the value. + + :param value: Algorithm name (e.g. http, tcp, ...). + :type value: ``str`` + + @rype :class:`Algorithm` """ try: return self._VALUE_TO_ALGORITHM_MAP[value] @@ -217,7 +332,12 @@ def _algorithm_to_value(self, algorithm): """ - Return value based in the algorithm (C{LBAlgorithm}). + Return string value for the provided algorithm. + + :param value: Algorithm enum. + :type value: :class:`Algorithm` + + @rype ``str`` """ try: return self._ALGORITHM_TO_VALUE_MAP[algorithm] diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/brightbox.py libcloud-0.15.1/libcloud/loadbalancer/drivers/brightbox.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/brightbox.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/brightbox.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from libcloud.utils.py3 import httplib +from libcloud.common.brightbox import BrightboxConnection +from libcloud.loadbalancer.base import Driver, Algorithm, Member +from libcloud.loadbalancer.base import LoadBalancer +from libcloud.loadbalancer.types import State +from libcloud.utils.misc import reverse_dict + +API_VERSION = '1.0' + + +class BrightboxLBDriver(Driver): + connectionCls = BrightboxConnection + + name = 'Brightbox' + website = 'http://www.brightbox.co.uk/' + + LB_STATE_MAP = { + 'creating': State.PENDING, + 'active': State.RUNNING, + 'deleting': State.UNKNOWN, + 'deleted': State.UNKNOWN, + 'failing': State.UNKNOWN, + 'failed': State.UNKNOWN, + } + + _VALUE_TO_ALGORITHM_MAP = { + 'round-robin': Algorithm.ROUND_ROBIN, + 'least-connections': Algorithm.LEAST_CONNECTIONS + } + + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + def list_protocols(self): + return ['tcp', 'http'] + + def list_balancers(self): + data = self.connection.request('/%s/load_balancers' % API_VERSION) \ + .object + + return list(map(self._to_balancer, data)) + + def create_balancer(self, name, port, protocol, algorithm, members): + response = self._post( + '/%s/load_balancers' % API_VERSION, + {'name': name, + 'nodes': list(map(self._member_to_node, members)), + 'policy': self._algorithm_to_value(algorithm), + 'listeners': [{'in': port, 'out': port, 'protocol': protocol}], + 'healthcheck': {'type': protocol, 'port': port}} + ) + + return self._to_balancer(response.object) + + def destroy_balancer(self, balancer): + response = self.connection.request('/%s/load_balancers/%s' % + (API_VERSION, balancer.id), + method='DELETE') + + return response.status == httplib.ACCEPTED + + def get_balancer(self, balancer_id): + data = self.connection.request( + '/%s/load_balancers/%s' % (API_VERSION, balancer_id)).object + return self._to_balancer(data) + + def balancer_attach_compute_node(self, balancer, node): + return self.balancer_attach_member(balancer, node) + + def balancer_attach_member(self, balancer, member): + path = '/%s/load_balancers/%s/add_nodes' % (API_VERSION, balancer.id) + + self._post(path, {'nodes': [self._member_to_node(member)]}) + + return member + + def balancer_detach_member(self, balancer, member): + path = '/%s/load_balancers/%s/remove_nodes' % (API_VERSION, + balancer.id) + + response = self._post(path, {'nodes': [self._member_to_node(member)]}) + + return response.status == httplib.ACCEPTED + + def balancer_list_members(self, balancer): + path = '/%s/load_balancers/%s' % (API_VERSION, balancer.id) + + data = self.connection.request(path).object + + func = lambda data: self._node_to_member(data, balancer) + return list(map(func, data['nodes'])) + + def _post(self, path, data={}): + headers = {'Content-Type': 'application/json'} + + return self.connection.request(path, data=data, headers=headers, + method='POST') + + def _to_balancer(self, data): + return LoadBalancer( + id=data['id'], + name=data['name'], + state=self.LB_STATE_MAP.get(data['status'], State.UNKNOWN), + ip=self._public_ip(data), + port=data['listeners'][0]['in'], + driver=self.connection.driver + ) + + def _member_to_node(self, member): + return {'node': member.id} + + def _node_to_member(self, data, balancer): + return Member(id=data['id'], ip=None, port=None, balancer=balancer) + + def _public_ip(self, data): + if len(data['cloud_ips']) > 0: + ip = data['cloud_ips'][0]['public_ip'] + else: + ip = None + + return ip diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/cloudstack.py libcloud-0.15.1/libcloud/loadbalancer/drivers/cloudstack.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/cloudstack.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,178 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.cloudstack import CloudStackDriverMixIn +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import Provider +from libcloud.loadbalancer.types import State +from libcloud.utils.misc import reverse_dict + + +class CloudStackLBDriver(CloudStackDriverMixIn, Driver): + """Driver for CloudStack load balancers.""" + + api_name = 'cloudstack_lb' + name = 'CloudStack' + website = 'http://cloudstack.org/' + type = Provider.CLOUDSTACK + + _VALUE_TO_ALGORITHM_MAP = { + 'roundrobin': Algorithm.ROUND_ROBIN, + 'leastconn': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + LB_STATE_MAP = { + 'Active': State.RUNNING, + } + + def __init__(self, key, secret=None, secure=True, host=None, + path=None, port=None, *args, **kwargs): + """ + @inherits: :class:`Driver.__init__` + """ + host = host if host else self.host + path = path if path else self.path + + if path is not None: + self.path = path + + if host is not None: + self.host = host + + if (self.type == Provider.CLOUDSTACK) and (not host or not path): + raise Exception('When instantiating CloudStack driver directly ' + + 'you also need to provide host and path argument') + + super(CloudStackLBDriver, self).__init__(key=key, secret=secret, + secure=secure, + host=host, port=port) + + def list_protocols(self): + """ + We don't actually have any protocol awareness beyond TCP. + + :rtype: ``list`` of ``str`` + """ + return ['tcp'] + + def list_balancers(self): + balancers = self._sync_request(command='listLoadBalancerRules', + method='GET') + balancers = balancers.get('loadbalancerrule', []) + return [self._to_balancer(balancer) for balancer in balancers] + + def get_balancer(self, balancer_id): + balancer = self._sync_request(command='listLoadBalancerRules', + params={'id': balancer_id}, + method='GET') + balancer = balancer.get('loadbalancerrule', []) + if not balancer: + raise Exception("no such load balancer: " + str(balancer_id)) + return self._to_balancer(balancer[0]) + + def create_balancer(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM, location=None, + private_port=None): + """ + @inherits: :class:`Driver.create_balancer` + + :param location: Location + :type location: :class:`NodeLocation` + + :param private_port: Private port + :type private_port: ``int`` + """ + if location is None: + locations = self._sync_request(command='listZones', method='GET') + location = locations['zone'][0]['id'] + else: + location = location.id + if private_port is None: + private_port = port + + result = self._async_request(command='associateIpAddress', + params={'zoneid': location}, + method='GET') + public_ip = result['ipaddress'] + + result = self._sync_request( + command='createLoadBalancerRule', + params={'algorithm': self._ALGORITHM_TO_VALUE_MAP[algorithm], + 'name': name, + 'privateport': private_port, + 'publicport': port, + 'publicipid': public_ip['id']}, + method='GET') + + balancer = self._to_balancer(result['loadbalancer']) + + for member in members: + balancer.attach_member(member) + + return balancer + + def destroy_balancer(self, balancer): + self._async_request(command='deleteLoadBalancerRule', + params={'id': balancer.id}, + method='GET') + self._async_request(command='disassociateIpAddress', + params={'id': balancer.ex_public_ip_id}, + method='GET') + + def balancer_attach_member(self, balancer, member): + member.port = balancer.ex_private_port + self._async_request(command='assignToLoadBalancerRule', + params={'id': balancer.id, + 'virtualmachineids': member.id}, + method='GET') + return True + + def balancer_detach_member(self, balancer, member): + self._async_request(command='removeFromLoadBalancerRule', + params={'id': balancer.id, + 'virtualmachineids': member.id}, + method='GET') + return True + + def balancer_list_members(self, balancer): + members = self._sync_request(command='listLoadBalancerRuleInstances', + params={'id': balancer.id}, + method='GET') + members = members['loadbalancerruleinstance'] + return [self._to_member(m, balancer.ex_private_port, balancer) + for m in members] + + def _to_balancer(self, obj): + balancer = LoadBalancer( + id=obj['id'], + name=obj['name'], + state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN), + ip=obj['publicip'], + port=obj['publicport'], + driver=self.connection.driver + ) + balancer.ex_private_port = obj['privateport'] + balancer.ex_public_ip_id = obj['publicipid'] + return balancer + + def _to_member(self, obj, port, balancer): + return Member( + id=obj['id'], + ip=obj['nic'][0]['ipaddress'], + port=port, + balancer=balancer + ) diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/elb.py libcloud-0.15.1/libcloud/loadbalancer/drivers/elb.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/elb.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/elb.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,350 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'ElasticLBDriver' +] + + +from libcloud.utils.py3 import httplib +from libcloud.utils.xml import findtext, findall +from libcloud.loadbalancer.types import State +from libcloud.loadbalancer.base import Driver, LoadBalancer, Member +from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection + + +VERSION = '2012-06-01' +HOST = 'elasticloadbalancing.%s.amazonaws.com' +ROOT = '/%s/' % (VERSION) +NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, ) + + +class ELBResponse(AWSGenericResponse): + """ + Amazon ELB response class. + """ + namespace = NS + exceptions = {} + xpath = 'Error' + + +class ELBConnection(SignedAWSConnection): + version = VERSION + host = HOST + responseCls = ELBResponse + + +class ElasticLBDriver(Driver): + name = 'Amazon Elastic Load Balancing' + website = 'http://aws.amazon.com/elasticloadbalancing/' + connectionCls = ELBConnection + + def __init__(self, access_id, secret, region): + super(ElasticLBDriver, self).__init__(access_id, secret) + self.region = region + self.connection.host = HOST % (region) + + def list_protocols(self): + return ['tcp', 'ssl', 'http', 'https'] + + def list_balancers(self): + params = {'Action': 'DescribeLoadBalancers'} + data = self.connection.request(ROOT, params=params).object + return self._to_balancers(data) + + def create_balancer(self, name, port, protocol, algorithm, members, + ex_members_availability_zones=None): + if ex_members_availability_zones is None: + ex_members_availability_zones = ['a'] + + params = { + 'Action': 'CreateLoadBalancer', + 'LoadBalancerName': name, + 'Listeners.member.1.InstancePort': str(port), + 'Listeners.member.1.InstanceProtocol': protocol.upper(), + 'Listeners.member.1.LoadBalancerPort': str(port), + 'Listeners.member.1.Protocol': protocol.upper(), + } + + for i, z in enumerate(ex_members_availability_zones): + zone = ''.join((self.region, z)) + params['AvailabilityZones.member.%d' % (i + 1)] = zone + + data = self.connection.request(ROOT, params=params).object + + balancer = LoadBalancer( + id=name, + name=name, + state=State.PENDING, + ip=findtext(element=data, xpath='DNSName', namespace=NS), + port=port, + driver=self.connection.driver + ) + balancer._members = [] + return balancer + + def destroy_balancer(self, balancer): + params = { + 'Action': 'DeleteLoadBalancer', + 'LoadBalancerName': balancer.id + } + self.connection.request(ROOT, params=params) + return True + + def get_balancer(self, balancer_id): + params = { + 'Action': 'DescribeLoadBalancers', + 'LoadBalancerNames.member.1': balancer_id + } + data = self.connection.request(ROOT, params=params).object + return self._to_balancers(data)[0] + + def balancer_attach_compute_node(self, balancer, node): + params = { + 'Action': 'RegisterInstancesWithLoadBalancer', + 'LoadBalancerName': balancer.id, + 'Instances.member.1.InstanceId': node.id + } + self.connection.request(ROOT, params=params) + balancer._members.append(Member(node.id, None, None, balancer=self)) + + def balancer_detach_member(self, balancer, member): + params = { + 'Action': 'DeregisterInstancesFromLoadBalancer', + 'LoadBalancerName': balancer.id, + 'Instances.member.1.InstanceId': member.id + } + self.connection.request(ROOT, params=params) + balancer._members = [m for m in balancer._members if m.id != member.id] + return True + + def balancer_list_members(self, balancer): + return balancer._members + + def ex_list_balancer_policies(self, balancer): + """ + Return a list of policy description string. + + :rtype: ``list`` of ``str`` + """ + params = { + 'Action': 'DescribeLoadBalancerPolicies', + 'LoadBalancerName': balancer.id + } + + data = self.connection.request(ROOT, params=params).object + return self._to_policies(data) + + def ex_list_balancer_policy_types(self): + """ + Return a list of policy type description string. + + :rtype: ``list`` of ``str`` + """ + params = {'Action': 'DescribeLoadBalancerPolicyTypes'} + + data = self.connection.request(ROOT, params=params).object + return self._to_policy_types(data) + + def ex_create_balancer_policy(self, name, policy_name, policy_type, + policy_attributes=None): + """ + Create a new load balancer policy + + :param name: Balancer name to create the policy for + :type name: ``str`` + + :param policy_name: policy to be created + :type policy_name: ``str`` + + :param policy_type: policy type being used to create policy. + :type policy_type: ``str`` + + :param policy_attributes: Each list contain values, ['AttributeName', + 'value'] + :type policy_attributes: ``PolicyAttribute list`` + """ + params = { + 'Action': 'CreateLoadBalancerPolicy', + 'LoadBalancerName': name, + 'PolicyName': policy_name, + 'PolicyTypeName': policy_type + } + + if policy_attributes is not None: + for index, (name, value) in enumerate( + policy_attributes.iteritems(), 1): + params['PolicyAttributes.member.%d. \ + AttributeName' % (index)] = name + params['PolicyAttributes.member.%d. \ + AttributeValue' % (index)] = value + + response = self.connection.request(ROOT, params=params) + return response.status == httplib.OK + + def ex_delete_balancer_policy(self, name, policy_name): + """ + Delete a load balancer policy + + :param name: balancer name for which policy will be deleted + :type name: ``str`` + + :param policy_name: The Mnemonic name for the policy being deleted + :type policy_name: ``str`` + """ + params = { + 'Action': 'DeleteLoadBalancerPolicy', + 'LoadBalancerName': name, + 'PolicyName': policy_name + } + + response = self.connection.request(ROOT, params=params) + return response.status == httplib.OK + + def ex_set_balancer_policies_listener(self, name, port, policies): + """ + Associates, updates, or disables a policy with a listener on + the load balancer + + :param name: balancer name to set policies for listerner + :type name: ``str`` + + :param port: port to use + :type port: ``str`` + + :param policies: List of policies to be associated with the balancer + :type policies: ``string list`` + """ + params = { + 'Action': 'SetLoadBalancerPoliciesOfListener', + 'LoadBalancerName': name, + 'LoadBalancerPort': str(port) + } + + if policies: + params = self._create_list_params(params, policies, + 'PolicyNames.member.%d') + + response = self.connection.request(ROOT, params=params) + return response.status == httplib.OK + + def ex_set_balancer_policies_backend_server(self, name, instance_port, + policies): + """ + Replaces the current set of policies associated with a port on + which the back-end server is listening with a new set of policies + + :param name: balancer name to set policies of backend server + :type name: ``str`` + + :param instance_port: Instance Port + :type instance_port: ``int`` + + :param policies: List of policies to be associated with the balancer + :type policies: ``string list` + """ + params = { + 'Action': 'SetLoadBalancerPoliciesForBackendServer', + 'LoadBalancerName': name, + 'InstancePort': str(instance_port) + } + + if policies: + params = self._create_list_params(params, policies, + 'PolicyNames.member.%d') + + response = self.connection.request(ROOT, params=params) + return response.status == httplib.OK + + def ex_create_balancer_listeners(self, name, listeners=None): + """ + Creates one or more listeners on a load balancer for the specified port + + :param name: The mnemonic name associated with the load balancer + :type name: ``str`` + + :param listeners: Each tuple contain values, (LoadBalancerPortNumber, + InstancePortNumber, Protocol,[SSLCertificateId]) + :type listeners: ``list of tuple` + """ + params = { + 'Action': 'CreateLoadBalancerListeners', + 'LoadBalancerName': name + } + + for index, listener in enumerate(listeners): + i = index + 1 + protocol = listener[2].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d. \ + SSLCertificateId' % i] = listener[3] + else: + return False + + response = self.connection.request(ROOT, params=params) + return response.status == httplib.OK + + def _to_policies(self, data): + xpath = 'DescribeLoadBalancerPoliciesResult/PolicyDescriptions/member' + return [findtext(element=el, xpath='PolicyName', namespace=NS) + for el in findall(element=data, xpath=xpath, namespace=NS)] + + def _to_policy_types(self, data): + xpath = 'DescribeLoadBalancerPolicyTypesResult/' + xpath += 'PolicyTypeDescriptions/member' + return [findtext(element=el, xpath='PolicyTypeName', namespace=NS) + for el in findall(element=data, xpath=xpath, namespace=NS)] + + def _to_balancers(self, data): + xpath = 'DescribeLoadBalancersResult/LoadBalancerDescriptions/member' + return [self._to_balancer(el) + for el in findall(element=data, xpath=xpath, namespace=NS)] + + def _to_balancer(self, el): + name = findtext(element=el, xpath='LoadBalancerName', namespace=NS) + dns_name = findtext(el, xpath='DNSName', namespace=NS) + port = findtext(el, xpath='LoadBalancerPort', namespace=NS) + + balancer = LoadBalancer( + id=name, + name=name, + state=State.UNKNOWN, + ip=dns_name, + port=port, + driver=self.connection.driver + ) + + xpath = 'Instances/member/InstanceId' + members = findall(element=el, xpath=xpath, namespace=NS) + balancer._members = [] + + for m in members: + balancer._members.append(Member(m.text, None, None, + balancer=balancer)) + + return balancer + + def _create_list_params(self, params, items, label): + """ + return parameter list + """ + if isinstance(items, str): + items = [items] + for index, item in enumerate(items): + params[label % (index + 1)] = item + return params diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/gce.py libcloud-0.15.1/libcloud/loadbalancer/drivers/gce.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/gce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/gce.py 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,362 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import simplejson as json +except ImportError: + import json # NOQA + +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.compute.drivers.gce import GCEConnection, GCENodeDriver + +# GCE doesn't actually give you a algorithm choice, but this is here simply as +# the closest match. The actual algorithm is described here: +# https://developers.google.com/compute/docs/load-balancing/#overview +DEFAULT_ALGORITHM = Algorithm.RANDOM + + +class GCELBDriver(Driver): + connectionCls = GCEConnection + apiname = 'googleapis' + name = 'Google Compute Engine Load Balancer' + website = 'https://cloud.google.com/' + + _VALUE_TO_ALGORITHM_MAP = { + 'RANDOM': Algorithm.RANDOM + } + + def __init__(self, *args, **kwargs): + + if kwargs.get('gce_driver'): + self.gce = kwargs['gce_driver'] + else: + self.gce = GCENodeDriver(*args, **kwargs) + + self.connection = self.gce.connection + + def _get_node_from_ip(self, ip): + """ + Return the node object that matches a given public IP address. + + :param ip: Public IP address to search for + :type ip: ``str`` + + :return: Node object that has the given IP, or None if not found. + :rtype: :class:`Node` or None + """ + all_nodes = self.gce.list_nodes(ex_zone='all') + for node in all_nodes: + if ip in node.public_ips: + return node + return None + + def list_protocols(self): + """ + Return a list of supported protocols. + + For GCE, this is simply a hardcoded list. + + :rtype: ``list`` of ``str`` + """ + return ['TCP', 'UDP'] + + def list_balancers(self, ex_region=None): + """ + List all loadbalancers + + :keyword ex_region: The region to return balancers from. If None, + will default to self.region. If 'all', will + return all balancers. + :type ex_region: ``str`` or :class:`GCERegion` or ``None`` + + :rtype: ``list`` of :class:`LoadBalancer` + """ + balancers = [] + for fwr in self.gce.ex_list_forwarding_rules(region=ex_region): + balancers.append(self._forwarding_rule_to_loadbalancer(fwr)) + return balancers + + def create_balancer(self, name, port, protocol, algorithm, members, + ex_region=None, ex_healthchecks=None, ex_address=None): + """ + Create a new load balancer instance. + + For GCE, this means creating a forwarding rule and a matching target + pool, then adding the members to the target pool. + + :param name: Name of the new load balancer (required) + :type name: ``str`` + + :param port: Port or range of ports the load balancer should listen + on, defaults to all ports. Examples: '80', '5000-5999' + :type port: ``str`` + + :param protocol: Load balancer protocol. Should be 'tcp' or 'udp', + defaults to 'tcp'. + :type protocol: ``str`` + + :param members: List of Members to attach to balancer. Can be Member + objects or Node objects. Node objects are preferred + for GCE, but Member objects are accepted to comply + with the established libcloud API. Note that the + 'port' attribute of the members is ignored. + :type members: ``list`` of :class:`Member` or :class:`Node` + + :param algorithm: Load balancing algorithm. Ignored for GCE which + uses a hashing-based algorithm. + :type algorithm: :class:`Algorithm` or ``None`` + + :keyword ex_region: Optional region to create the load balancer in. + Defaults to the default region of the GCE Node + Driver. + :type ex_region: C{GCERegion} or ``str`` + + :keyword ex_healthchecks: Optional list of healthcheck objects or + names to add to the load balancer. + :type ex_healthchecks: ``list`` of :class:`GCEHealthCheck` or + ``str`` + + :keyword ex_address: Optional static address object to be assigned to + the load balancer. + :type ex_address: C{GCEAddress} + + :return: LoadBalancer object + :rtype: :class:`LoadBalancer` + """ + node_list = [] + for member in members: + # Member object + if hasattr(member, 'ip'): + if member.extra.get('node'): + node_list.append(member.extra['node']) + else: + node_list.append(self._get_node_from_ip(member.ip)) + # Node object + elif hasattr(member, 'name'): + node_list.append(member) + # Assume it's a node name otherwise + else: + node_list.append(self.gce.ex_get_node(member, 'all')) + + # Create Target Pool + tp_name = '%s-tp' % name + targetpool = self.gce.ex_create_targetpool( + tp_name, region=ex_region, healthchecks=ex_healthchecks, + nodes=node_list) + + # Create the Forwarding rule, but if it fails, delete the target pool. + try: + forwarding_rule = self.gce.ex_create_forwarding_rule( + name, targetpool, region=ex_region, protocol=protocol, + port_range=port, address=ex_address) + except: + targetpool.destroy() + raise + + # Reformat forwarding rule to LoadBalancer object + return self._forwarding_rule_to_loadbalancer(forwarding_rule) + + def destroy_balancer(self, balancer): + """ + Destroy a load balancer. + + For GCE, this means destroying the associated forwarding rule, then + destroying the target pool that was attached to the forwarding rule. + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :return: True if successful + :rtype: ``bool`` + """ + destroy = balancer.extra['forwarding_rule'].destroy() + if destroy: + tp_destroy = balancer.extra['targetpool'].destroy() + return tp_destroy + else: + return destroy + + def get_balancer(self, balancer_id): + """ + Return a :class:`LoadBalancer` object. + + :param balancer_id: Name of load balancer you wish to fetch. For GCE, + this is the name of the associated forwarding + rule. + :param balancer_id: ``str`` + + :rtype: :class:`LoadBalancer` + """ + fwr = self.gce.ex_get_forwarding_rule(balancer_id) + return self._forwarding_rule_to_loadbalancer(fwr) + + def balancer_attach_compute_node(self, balancer, node): + """ + Attach a compute node as a member to the load balancer. + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param node: Node to join to the balancer + :type node: :class:`Node` + + :return: Member after joining the balancer. + :rtype: :class:`Member` + """ + add_node = balancer.extra['targetpool'].add_node(node) + if add_node: + return self._node_to_member(node, balancer) + + def balancer_attach_member(self, balancer, member): + """ + Attach a member to balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param member: Member to join to the balancer + :type member: :class:`Member` + + :return: Member after joining the balancer. + :rtype: :class:`Member` + """ + node = member.extra.get('node') or self._get_node_from_ip(member.ip) + add_node = balancer.extra['targetpool'].add_node(node) + if add_node: + return self._node_to_member(node, balancer) + + def balancer_detach_member(self, balancer, member): + """ + Detach member from balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param member: Member which should be used + :type member: :class:`Member` + + :return: True if member detach was successful, otherwise False + :rtype: ``bool`` + """ + node = member.extra.get('node') or self._get_node_from_ip(member.ip) + remove_node = balancer.extra['targetpool'].remove_node(node) + return remove_node + + def balancer_list_members(self, balancer): + """ + Return list of members attached to balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :rtype: ``list`` of :class:`Member` + """ + return [self._node_to_member(n, balancer) for n in + balancer.extra['targetpool'].nodes] + + def ex_create_healthcheck(self, *args, **kwargs): + return self.gce.ex_create_healthcheck(*args, **kwargs) + + def ex_list_healthchecks(self): + return self.gce.ex_list_healthchecks() + + def ex_balancer_attach_healthcheck(self, balancer, healthcheck): + """ + Attach a healthcheck to balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param healthcheck: Healthcheck to add + :type healthcheck: :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return balancer.extra['targetpool'].add_healthcheck(healthcheck) + + def ex_balancer_detach_healthcheck(self, balancer, healthcheck): + """ + Detach healtcheck from balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :param healthcheck: Healthcheck to remove + :type healthcheck: :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return balancer.extra['targetpool'].remove_healthcheck(healthcheck) + + def ex_balancer_list_healthchecks(self, balancer): + """ + Return list of healthchecks attached to balancer + + :param balancer: LoadBalancer which should be used + :type balancer: :class:`LoadBalancer` + + :rtype: ``list`` of :class:`HealthChecks` + """ + return balancer.extra['healthchecks'] + + def _node_to_member(self, node, balancer): + """ + Return a Member object based on a Node. + + :param node: Node object + :type node: :class:`Node` + + :keyword balancer: The balancer the member is attached to. + :type balancer: :class:`LoadBalancer` + + :return: Member object + :rtype: :class:`Member` + """ + # A balancer can have a node as a member, even if the node doesn't + # exist. In this case, 'node' is simply a string to where the resource + # would be found if it was there. + if hasattr(node, 'name'): + member_id = node.name + member_ip = node.public_ips[0] + else: + member_id = node + member_ip = None + + extra = {'node': node} + return Member(id=member_id, ip=member_ip, port=balancer.port, + balancer=balancer, extra=extra) + + def _forwarding_rule_to_loadbalancer(self, forwarding_rule): + """ + Return a Load Balancer object based on a GCEForwardingRule object. + + :param forwarding_rule: ForwardingRule object + :type forwarding_rule: :class:`GCEForwardingRule` + + :return: LoadBalancer object + :rtype: :class:`LoadBalancer` + """ + extra = {} + extra['forwarding_rule'] = forwarding_rule + extra['targetpool'] = forwarding_rule.targetpool + extra['healthchecks'] = forwarding_rule.targetpool.healthchecks + + return LoadBalancer(id=forwarding_rule.id, + name=forwarding_rule.name, state=None, + ip=forwarding_rule.address, + port=forwarding_rule.extra['portRange'], + driver=self, extra=extra) diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/gogrid.py libcloud-0.15.1/libcloud/loadbalancer/drivers/gogrid.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/gogrid.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/gogrid.py 2013-11-29 12:35:04.000000000 +0000 @@ -13,21 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import time -import httplib + +from libcloud.utils.py3 import httplib try: - import json -except ImportError: import simplejson as json +except ImportError: + import json +from libcloud.utils.misc import reverse_dict from libcloud.common.types import LibcloudError -from libcloud.utils import reverse_dict -from libcloud.common.gogrid import GoGridConnection, GoGridResponse, BaseGoGridDriver +from libcloud.common.gogrid import GoGridConnection, GoGridResponse,\ + BaseGoGridDriver from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM from libcloud.loadbalancer.types import State, LibcloudLBImmutableError + class GoGridLBResponse(GoGridResponse): def success(self): if self.status == httplib.INTERNAL_SERVER_ERROR: @@ -35,42 +39,55 @@ # "unexpected server error" body = json.loads(self.body) if body['method'] == '/grid/loadbalancer/add' and \ - len(body['list']) >= 1 and \ - body['list'][0]['message'].find('unexpected server error') != -1: - raise LibcloudError(value='You mostly likely tried to add a ' - + 'member with an IP address not assigned ' - + 'to your account', driver=self) + len(body['list']) >= 1 and \ + body['list'][0]['message'].find( + 'unexpected server error') != -1: + raise LibcloudError( + value='You mostly likely tried to add a member with an IP' + ' address not assigned to your account', driver=self) return super(GoGridLBResponse, self).success() + class GoGridLBConnection(GoGridConnection): """ Connection class for the GoGrid load-balancer driver. """ responseCls = GoGridLBResponse + class GoGridLBDriver(BaseGoGridDriver, Driver): connectionCls = GoGridLBConnection api_name = 'gogrid_lb' name = 'GoGrid LB' + website = 'http://www.gogrid.com/' - LB_STATE_MAP = { 'On': State.RUNNING, - 'Unknown': State.UNKNOWN } + LB_STATE_MAP = {'On': State.RUNNING, + 'Unknown': State.UNKNOWN} _VALUE_TO_ALGORITHM_MAP = { 'round robin': Algorithm.ROUND_ROBIN, 'least connect': Algorithm.LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + def __init__(self, *args, **kwargs): + """ + @inherits: :class:`Driver.__init__` + """ + super(GoGridLBDriver, self).__init__(*args, **kwargs) + def list_protocols(self): # GoGrid only supports http - return [ 'http' ] + return ['http'] def list_balancers(self): return self._to_balancers( - self.connection.request('/api/grid/loadbalancer/list').object) + self.connection.request('/api/grid/loadbalancer/list').object) - def ex_create_balancer_nowait(self, name, members, protocol='http', port=80, - algorithm=DEFAULT_ALGORITHM): + def ex_create_balancer_nowait(self, name, members, protocol='http', + port=80, algorithm=DEFAULT_ALGORITHM): + """ + @inherits: :class:`Driver.create_balancer` + """ algorithm = self._algorithm_to_value(algorithm) params = {'name': name, @@ -80,8 +97,8 @@ params.update(self._members_to_params(members)) resp = self.connection.request('/api/grid/loadbalancer/add', - method='GET', - params=params) + method='GET', + params=params) return self._to_balancers(resp.object)[0] def create_balancer(self, name, members, protocol='http', port=80, @@ -110,12 +127,14 @@ def destroy_balancer(self, balancer): try: - resp = self.connection.request('/api/grid/loadbalancer/delete', - method='POST', params={'id': balancer.id}) - except Exception as err: - if "Update request for LoadBalancer" in str(err): - raise LibcloudLBImmutableError("Cannot delete immutable object", - GoGridLBDriver) + resp = self.connection.request( + '/api/grid/loadbalancer/delete', method='POST', + params={'id': balancer.id}) + except Exception: + e = sys.exc_info()[1] + if "Update request for LoadBalancer" in str(e): + raise LibcloudLBImmutableError( + "Cannot delete immutable object", GoGridLBDriver) else: raise @@ -131,7 +150,7 @@ params['id'] = balancer_id resp = self.connection.request('/api/grid/loadbalancer/get', - params=params) + params=params) return self._to_balancers(resp.object)[0] @@ -144,10 +163,10 @@ params.update(self._members_to_params(members)) resp = self._update_balancer(params) - - return [ m for m in - self._to_members(resp.object["list"][0]["realiplist"]) - if m.ip == member.ip ][0] + return [m for m in + self._to_members(resp.object["list"][0]["realiplist"], + balancer) + if m.ip == member.ip][0] def balancer_detach_member(self, balancer, member): members = self.balancer_list_members(balancer) @@ -163,23 +182,25 @@ def balancer_list_members(self, balancer): resp = self.connection.request('/api/grid/loadbalancer/get', - params={'id': balancer.id}) - return self._to_members(resp.object["list"][0]["realiplist"]) + params={'id': balancer.id}) + return self._to_members(resp.object["list"][0]["realiplist"], balancer) def _update_balancer(self, params): try: return self.connection.request('/api/grid/loadbalancer/edit', - method='POST', - params=params) - except Exception as err: - if "Update already pending" in str(err): - raise LibcloudLBImmutableError("Balancer is immutable", GoGridLBDriver) + method='POST', + params=params) + except Exception: + e = sys.exc_info()[1] + if "Update already pending" in str(e): + raise LibcloudLBImmutableError( + "Balancer is immutable", GoGridLBDriver) - raise LibcloudError(value='Exception: %s' % str(err), driver=self) + raise LibcloudError(value='Exception: %s' % str(e), driver=self) def _members_to_params(self, members): """ - Helper method to convert list of L{Member} objects + Helper method to convert list of :class:`Member` objects to GET params. """ @@ -195,23 +216,24 @@ return params def _to_balancers(self, object): - return [ self._to_balancer(el) for el in object["list"] ] + return [self._to_balancer(el) for el in object["list"]] def _to_balancer(self, el): lb = LoadBalancer(id=el.get("id"), - name=el["name"], - state=self.LB_STATE_MAP.get( - el["state"]["name"], State.UNKNOWN), - ip=el["virtualip"]["ip"]["ip"], - port=el["virtualip"]["port"], - driver=self.connection.driver) + name=el["name"], + state=self.LB_STATE_MAP.get( + el["state"]["name"], State.UNKNOWN), + ip=el["virtualip"]["ip"]["ip"], + port=el["virtualip"]["port"], + driver=self.connection.driver) return lb - def _to_members(self, object): - return [ self._to_member(el) for el in object ] + def _to_members(self, object, balancer=None): + return [self._to_member(el, balancer) for el in object] - def _to_member(self, el): + def _to_member(self, el, balancer=None): member = Member(id=el["ip"]["id"], - ip=el["ip"]["ip"], - port=el["port"]) + ip=el["ip"]["ip"], + port=el["port"], + balancer=balancer) return member diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/ninefold.py libcloud-0.15.1/libcloud/loadbalancer/drivers/ninefold.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/ninefold.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/ninefold.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.loadbalancer.providers import Provider + +from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver + + +class NinefoldLBDriver(CloudStackLBDriver): + "Driver for load balancers on Ninefold's Compute platform." + + host = 'api.ninefold.com' + path = '/compute/v1.0/' + + type = Provider.NINEFOLD + name = 'Ninefold LB' + website = 'http://ninefold.com/' diff -Nru libcloud-0.5.0/libcloud/loadbalancer/drivers/rackspace.py libcloud-0.15.1/libcloud/loadbalancer/drivers/rackspace.py --- libcloud-0.5.0/libcloud/loadbalancer/drivers/rackspace.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/drivers/rackspace.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,106 +13,442 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from datetime import datetime try: - import json -except ImportError: import simplejson as json +except ImportError: + import json -from libcloud.utils import reverse_dict -from libcloud.common.base import Response +from libcloud.utils.py3 import httplib +from libcloud.utils.misc import reverse_dict from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM -from libcloud.loadbalancer.types import Provider, State -from libcloud.common.rackspace import (AUTH_HOST_US, - RackspaceBaseConnection) +from libcloud.compute.drivers.rackspace import RackspaceConnection +from libcloud.common.types import LibcloudError +from libcloud.common.base import JsonResponse, PollingConnection +from libcloud.loadbalancer.types import State, MemberCondition +from libcloud.common.openstack import OpenStackDriverMixin +from libcloud.common.rackspace import AUTH_URL + +ENDPOINT_ARGS_MAP = { + 'dfw': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'DFW'}, + 'ord': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'ORD'}, + 'iad': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'IAD'}, + 'lon': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'LON'}, + 'syd': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'SYD'}, + 'hkg': {'service_type': 'rax:load-balancer', + 'name': 'cloudLoadBalancers', + 'region': 'HKG'}, -class RackspaceResponse(Response): +} - def success(self): - return 200 <= int(self.status) <= 299 +class RackspaceResponse(JsonResponse): def parse_body(self): if not self.body: return None - else: - return json.loads(self.body) + return super(RackspaceResponse, self).parse_body() -class RackspaceConnection(RackspaceBaseConnection): - responseCls = RackspaceResponse - auth_host = AUTH_HOST_US - _url_key = "lb_url" + def success(self): + return 200 <= int(self.status) <= 299 + + +class RackspaceHealthMonitor(object): + """ + :param type: type of load balancer. currently CONNECT (connection + monitoring), HTTP, HTTPS (connection and HTTP + monitoring) are supported. + :type type: ``str`` + + :param delay: minimum seconds to wait before executing the health + monitor. (Must be between 1 and 3600) + :type delay: ``int`` + + :param timeout: maximum seconds to wait when establishing a + connection before timing out. (Must be between 1 + and 3600) + :type timeout: ``int`` + + :param attempts_before_deactivation: Number of monitor failures + before removing a node from + rotation. (Must be between 1 + and 10) + :type attempts_before_deactivation: ``int`` + """ + + def __init__(self, type, delay, timeout, attempts_before_deactivation): + self.type = type + self.delay = delay + self.timeout = timeout + self.attempts_before_deactivation = attempts_before_deactivation + + def __repr__(self): + return ('' % + (self.type, self.delay, self.timeout, + self.attempts_before_deactivation)) + + def _to_dict(self): + return { + 'type': self.type, + 'delay': self.delay, + 'timeout': self.timeout, + 'attemptsBeforeDeactivation': self.attempts_before_deactivation + } + + +class RackspaceHTTPHealthMonitor(RackspaceHealthMonitor): + """ + A HTTP health monitor adds extra features to a Rackspace health monitor. + + :param path: the HTTP path to monitor. + :type path: ``str`` + + :param body_regex: Regular expression used to evaluate the body of + the HTTP response. + :type body_regex: ``str`` + + :param status_regex: Regular expression used to evaluate the HTTP + status code of the response. + :type status_regex: ``str`` + """ + + def __init__(self, type, delay, timeout, attempts_before_deactivation, + path, body_regex, status_regex): + super(RackspaceHTTPHealthMonitor, self).__init__( + type, delay, timeout, attempts_before_deactivation) + self.path = path + self.body_regex = body_regex + self.status_regex = status_regex + + def __repr__(self): + return ('' % + (self.type, self.delay, self.timeout, + self.attempts_before_deactivation, self.path, self.body_regex, + self.status_regex)) + + def _to_dict(self): + super_dict = super(RackspaceHTTPHealthMonitor, self)._to_dict() + super_dict['path'] = self.path + super_dict['statusRegex'] = self.status_regex + + if self.body_regex: + super_dict['bodyRegex'] = self.body_regex + + return super_dict + + +class RackspaceConnectionThrottle(object): + """ + :param min_connections: Minimum number of connections per IP address + before applying throttling. + :type min_connections: ``int`` + + :param max_connections: Maximum number of connections per IP address. + (Must be between 0 and 100000, 0 allows an + unlimited number of connections.) + :type max_connections: ``int`` + + :param max_connection_rate: Maximum number of connections allowed + from a single IP address within the + given rate_interval_seconds. (Must be + between 0 and 100000, 0 allows an + unlimited number of connections.) + :type max_connection_rate: ``int`` + + :param rate_interval_seconds: Interval at which the + max_connection_rate is enforced. + (Must be between 1 and 3600.) + :type rate_interval_seconds: ``int`` + """ + + def __init__(self, min_connections, max_connections, + max_connection_rate, rate_interval_seconds): + self.min_connections = min_connections + self.max_connections = max_connections + self.max_connection_rate = max_connection_rate + self.rate_interval_seconds = rate_interval_seconds + + def __repr__(self): + return ('' % + (self.min_connections, self.max_connections, + self.max_connection_rate, self.rate_interval_seconds)) + + def _to_dict(self): + return { + 'maxConnections': self.max_connections, + 'minConnections': self.min_connections, + 'maxConnectionRate': self.max_connection_rate, + 'rateInterval': self.rate_interval_seconds + } + + +class RackspaceAccessRuleType(object): + ALLOW = 0 + DENY = 1 + + _RULE_TYPE_STRING_MAP = { + ALLOW: 'ALLOW', + DENY: 'DENY' + } + + +class RackspaceAccessRule(object): + """ + An access rule allows or denies traffic to a Load Balancer based on the + incoming IPs. + + :param id: Unique identifier to refer to this rule by. + :type id: ``str`` + + :param rule_type: RackspaceAccessRuleType.ALLOW or + RackspaceAccessRuleType.DENY. + :type id: ``int`` + + :param address: IP address or cidr (can be IPv4 or IPv6). + :type address: ``str`` + """ + + def __init__(self, id=None, rule_type=None, address=None): + self.id = id + self.rule_type = rule_type + self.address = address + + def _to_dict(self): + type_string =\ + RackspaceAccessRuleType._RULE_TYPE_STRING_MAP[self.rule_type] + + as_dict = { + 'type': type_string, + 'address': self.address + } + + if self.id is not None: + as_dict['id'] = self.id + + return as_dict - def __init__(self, user_id, key, secure=True): - super(RackspaceConnection, self).__init__(user_id, key, secure) - self.api_version = 'v1.0' - self.accept_format = 'application/json' - def request(self, action, params=None, data='', headers=None, method='GET'): +class RackspaceConnection(RackspaceConnection, PollingConnection): + responseCls = RackspaceResponse + auth_url = AUTH_URL + poll_interval = 2 + timeout = 80 + cache_busting = True + + def request(self, action, params=None, data='', headers=None, + method='GET'): if not headers: headers = {} if not params: params = {} - if self.lb_url: - action = self.lb_url + action + if method in ('POST', 'PUT'): headers['Content-Type'] = 'application/json' - if method == 'GET': - params['cache-busing'] = os.urandom(8).encode('hex') - return super(RackspaceConnection, self).request(action=action, - params=params, data=data, method=method, headers=headers) + return super(RackspaceConnection, self).request( + action=action, params=params, + data=data, method=method, headers=headers) + + def get_poll_request_kwargs(self, response, context, request_kwargs): + return {'action': request_kwargs['action'], + 'method': 'GET'} + + def has_completed(self, response): + state = response.object['loadBalancer']['status'] + if state == 'ERROR': + raise LibcloudError("Load balancer entered an ERROR state.", + driver=self.driver) + + return state == 'ACTIVE' + def encode_data(self, data): + return data -class RackspaceLBDriver(Driver): + +class RackspaceLBDriver(Driver, OpenStackDriverMixin): connectionCls = RackspaceConnection api_name = 'rackspace_lb' name = 'Rackspace LB' + website = 'http://www.rackspace.com/' + + LB_STATE_MAP = { + 'ACTIVE': State.RUNNING, + 'BUILD': State.PENDING, + 'ERROR': State.ERROR, + 'DELETED': State.DELETED, + 'PENDING_UPDATE': State.PENDING, + 'PENDING_DELETE': State.PENDING + } + + LB_MEMBER_CONDITION_MAP = { + 'ENABLED': MemberCondition.ENABLED, + 'DISABLED': MemberCondition.DISABLED, + 'DRAINING': MemberCondition.DRAINING + } + + CONDITION_LB_MEMBER_MAP = reverse_dict(LB_MEMBER_CONDITION_MAP) - LB_STATE_MAP = { 'ACTIVE': State.RUNNING, - 'BUILD': State.PENDING } _VALUE_TO_ALGORITHM_MAP = { 'RANDOM': Algorithm.RANDOM, 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, - 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS + 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS, + 'WEIGHTED_ROUND_ROBIN': Algorithm.WEIGHTED_ROUND_ROBIN, + 'WEIGHTED_LEAST_CONNECTIONS': Algorithm.WEIGHTED_LEAST_CONNECTIONS } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='ord', **kwargs): + ex_force_region = kwargs.pop('ex_force_region', None) + if ex_force_region: + # For backward compatibility + region = ex_force_region + OpenStackDriverMixin.__init__(self, **kwargs) + super(RackspaceLBDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, region=region) + + def _ex_connection_class_kwargs(self): + endpoint_args = ENDPOINT_ARGS_MAP[self.region] + kwargs = self.openstack_connection_kwargs() + kwargs['get_endpoint_args'] = endpoint_args + return kwargs + def list_protocols(self): return self._to_protocols( - self.connection.request('/loadbalancers/protocols').object) + self.connection.request('/loadbalancers/protocols').object) + + def ex_list_protocols_with_default_ports(self): + """ + List protocols with default ports. + + :rtype: ``list`` of ``tuple`` + :return: A list of protocols with default ports included. + """ + return self._to_protocols_with_default_ports( + self.connection.request('/loadbalancers/protocols').object) + + def list_balancers(self, ex_member_address=None): + """ + @inherits: :class:`Driver.list_balancers` + + :param ex_member_address: Optional IP address of the attachment member. + If provided, only the load balancers which + have this member attached will be returned. + :type ex_member_address: ``str`` + """ + params = {} + + if ex_member_address: + params['nodeaddress'] = ex_member_address - def list_balancers(self): return self._to_balancers( - self.connection.request('/loadbalancers').object) + self.connection.request('/loadbalancers', params=params).object) def create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM): - algorithm = self._algorithm_to_value(algorithm) + return self.ex_create_balancer(name, members, protocol, port, + algorithm) - balancer_object = {"loadBalancer": - {"name": name, - "port": port, - "algorithm": algorithm, - "protocol": protocol.upper(), - "virtualIps": [{"type": "PUBLIC"}], - "nodes": [{"address": member.ip, - "port": member.port, - "condition": "ENABLED"} for member in members], - } - } + def ex_create_balancer(self, name, members, protocol='http', + port=80, algorithm=DEFAULT_ALGORITHM, vip='PUBLIC'): + """ + Creates a new load balancer instance + + :param name: Name of the new load balancer (required) + :type name: ``str`` + + :param members: ``list`` of:class:`Member`s to attach to balancer + :type members: ``list`` of :class:`Member` + + :param protocol: Loadbalancer protocol, defaults to http. + :type protocol: ``str`` + + :param port: Port the load balancer should listen on, defaults to 80 + :type port: ``str`` + + :param algorithm: Load balancing algorithm, defaults to + LBAlgorithm.ROUND_ROBIN + :type algorithm: :class:`Algorithm` + + :param vip: Virtual ip type of PUBLIC, SERVICENET, or ID of a virtual + ip + :type vip: ``str`` + + :rtype: :class:`LoadBalancer` + """ + balancer_attrs = self._kwargs_to_mutable_attrs( + name=name, + protocol=protocol, + port=port, + algorithm=algorithm, + vip=vip) + + balancer_attrs.update({ + 'nodes': [self._member_attributes(member) for member in members], + }) + # balancer_attrs['nodes'] = ['fu'] + balancer_object = {"loadBalancer": balancer_attrs} resp = self.connection.request('/loadbalancers', - method='POST', - data=json.dumps(balancer_object)) - return self._to_balancer(resp.object["loadBalancer"]) + method='POST', + data=json.dumps(balancer_object)) + return self._to_balancer(resp.object['loadBalancer']) + + def _member_attributes(self, member): + member_attributes = {'address': member.ip, + 'port': member.port} + + member_attributes.update(self._kwargs_to_mutable_member_attrs( + **member.extra)) + + # If the condition is not specified on the member, then it should be + # set to ENABLED by default + if 'condition' not in member_attributes: + member_attributes['condition'] =\ + self.CONDITION_LB_MEMBER_MAP[MemberCondition.ENABLED] + + return member_attributes def destroy_balancer(self, balancer): uri = '/loadbalancers/%s' % (balancer.id) resp = self.connection.request(uri, method='DELETE') - return resp.status == 202 + return resp.status == httplib.ACCEPTED + + def ex_destroy_balancers(self, balancers): + """ + Destroys a list of Balancers (the API supports up to 10). + + :param balancers: A list of Balancers to destroy. + :type balancers: ``list`` of :class:`LoadBalancer` + + :return: Returns whether the destroy request was accepted. + :rtype: ``bool`` + """ + ids = [('id', balancer.id) for balancer in balancers] + resp = self.connection.request('/loadbalancers', + method='DELETE', + params=ids) + + return resp.status == httplib.ACCEPTED def get_balancer(self, balancer_id): uri = '/loadbalancers/%s' % (balancer_id) @@ -121,33 +457,851 @@ return self._to_balancer(resp.object["loadBalancer"]) def balancer_attach_member(self, balancer, member): - ip = member.ip - port = member.port + member_object = {"nodes": [self._member_attributes(member)]} + + uri = '/loadbalancers/%s/nodes' % (balancer.id) + resp = self.connection.request(uri, method='POST', + data=json.dumps(member_object)) + return self._to_members(resp.object, balancer)[0] - member_object = {"nodes": - [{"port": port, - "address": ip, - "condition": "ENABLED"}] - } + def ex_balancer_attach_members(self, balancer, members): + """ + Attaches a list of members to a load balancer. + + :param balancer: The Balancer to which members will be attached. + :type balancer: :class:`LoadBalancer` + + :param members: A list of Members to attach. + :type members: ``list`` of :class:`Member` + + :rtype: ``list`` of :class:`Member` + """ + member_objects = {"nodes": [self._member_attributes(member) for member + in members]} uri = '/loadbalancers/%s/nodes' % (balancer.id) resp = self.connection.request(uri, method='POST', - data=json.dumps(member_object)) - return self._to_members(resp.object)[0] + data=json.dumps(member_objects)) + return self._to_members(resp.object, balancer) def balancer_detach_member(self, balancer, member): # Loadbalancer always needs to have at least 1 member. - # Last member cannot be detached. You can only disable it or destroy the - # balancer. + # Last member cannot be detached. You can only disable it or destroy + # the balancer. uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id) resp = self.connection.request(uri, method='DELETE') - return resp.status == 202 + return resp.status == httplib.ACCEPTED + + def ex_balancer_detach_members(self, balancer, members): + """ + Detaches a list of members from a balancer (the API supports up to 10). + This method blocks until the detach request has been processed and the + balancer is in a RUNNING state again. + + :param balancer: The Balancer to detach members from. + :type balancer: :class:`LoadBalancer` + + :param members: A list of Members to detach. + :type members: ``list`` of :class:`Member` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_balancer_detach_members_no_poll(balancer, members) + + if not accepted: + msg = 'Detach members request was not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_balancer_detach_members_no_poll(self, balancer, members): + """ + Detaches a list of members from a balancer (the API supports up to 10). + This method returns immediately. + + :param balancer: The Balancer to detach members from. + :type balancer: :class:`LoadBalancer` + + :param members: A list of Members to detach. + :type members: ``list`` of :class:`Member` + + :return: Returns whether the detach request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/nodes' % (balancer.id) + ids = [('id', member.id) for member in members] + resp = self.connection.request(uri, method='DELETE', params=ids) + + return resp.status == httplib.ACCEPTED def balancer_list_members(self, balancer): uri = '/loadbalancers/%s/nodes' % (balancer.id) - return self._to_members( - self.connection.request(uri).object) + data = self.connection.request(uri).object + return self._to_members(data, balancer) + + def update_balancer(self, balancer, **kwargs): + attrs = self._kwargs_to_mutable_attrs(**kwargs) + resp = self.connection.async_request( + action='/loadbalancers/%s' % balancer.id, + method='PUT', + data=json.dumps(attrs)) + return self._to_balancer(resp.object["loadBalancer"]) + + def ex_update_balancer_no_poll(self, balancer, **kwargs): + """ + Update balancer no poll. + + @inherits: :class:`Driver.update_balancer` + """ + attrs = self._kwargs_to_mutable_attrs(**kwargs) + resp = self.connection.request( + action='/loadbalancers/%s' % balancer.id, + method='PUT', + data=json.dumps(attrs)) + return resp.status == httplib.ACCEPTED + + def ex_balancer_update_member(self, balancer, member, **kwargs): + """ + Updates a Member's extra attributes for a Balancer. The attributes can + include 'weight' or 'condition'. This method blocks until the update + request has been processed and the balancer is in a RUNNING state + again. + + :param balancer: Balancer to update the member on. + :type balancer: :class:`LoadBalancer` + + :param member: Member which should be used + :type member: :class:`Member` + + :keyword **kwargs: New attributes. Should contain either 'weight' + or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'. + or 'DRAINING'. 'weight' can be set to a positive integer between + 1 and 100, with a higher weight indicating that the node will receive + more traffic (assuming the Balancer is using a weighted algorithm). + :type **kwargs: ``dict`` + + :return: Updated Member. + :rtype: :class:`Member` + """ + accepted = self.ex_balancer_update_member_no_poll( + balancer, member, **kwargs) + + if not accepted: + msg = 'Update member attributes was not accepted' + raise LibcloudError(msg, driver=self) + + balancer = self._get_updated_balancer(balancer) + members = balancer.extra['members'] + + updated_members = [m for m in members if m.id == member.id] + + if not updated_members: + raise LibcloudError('Could not find updated member') + + return updated_members[0] + + def ex_balancer_update_member_no_poll(self, balancer, member, **kwargs): + """ + Updates a Member's extra attributes for a Balancer. The attribute can + include 'weight' or 'condition'. This method returns immediately. + + :param balancer: Balancer to update the member on. + :type balancer: :class:`LoadBalancer` + + :param member: Member which should be used + :type member: :class:`Member` + + :keyword **kwargs: New attributes. Should contain either 'weight' + or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'. + or 'DRAINING'. 'weight' can be set to a positive integer between + 1 and 100, with a higher weight indicating that the node will receive + more traffic (assuming the Balancer is using a weighted algorithm). + :type **kwargs: ``dict`` + + :return: Returns whether the update request was accepted. + :rtype: ``bool`` + """ + resp = self.connection.request( + action='/loadbalancers/%s/nodes/%s' % (balancer.id, member.id), + method='PUT', + data=json.dumps(self._kwargs_to_mutable_member_attrs(**kwargs)) + ) + + return resp.status == httplib.ACCEPTED + + def ex_list_algorithm_names(self): + """ + Lists algorithms supported by the API. Returned as strings because + this list may change in the future. + + :rtype: ``list`` of ``str`` + """ + response = self.connection.request('/loadbalancers/algorithms') + return [a["name"].upper() for a in response.object["algorithms"]] + + def ex_get_balancer_error_page(self, balancer): + """ + List error page configured for the specified load balancer. + + :param balancer: Balancer which should be used + :type balancer: :class:`LoadBalancer` + + :rtype: ``str`` + """ + uri = '/loadbalancers/%s/errorpage' % (balancer.id) + resp = self.connection.request(uri) + + return resp.object["errorpage"]["content"] + + def ex_balancer_access_list(self, balancer): + """ + List the access list. + + :param balancer: Balancer which should be used + :type balancer: :class:`LoadBalancer` + + :rtype: ``list`` of :class:`RackspaceAccessRule` + """ + uri = '/loadbalancers/%s/accesslist' % (balancer.id) + resp = self.connection.request(uri) + + return [self._to_access_rule(el) for el in resp.object["accessList"]] + + def _get_updated_balancer(self, balancer): + """ + Updating a balancer's attributes puts a balancer into + 'PENDING_UPDATE' status. Wait until the balancer is + back in 'ACTIVE' status and then return the individual + balancer details call. + """ + resp = self.connection.async_request( + action='/loadbalancers/%s' % balancer.id, + method='GET') + + return self._to_balancer(resp.object['loadBalancer']) + + def ex_update_balancer_health_monitor(self, balancer, health_monitor): + """ + Sets a Balancer's health monitor. This method blocks until the update + request has been processed and the balancer is in a RUNNING state + again. + + :param balancer: Balancer to update. + :type balancer: :class:`LoadBalancer` + + :param health_monitor: Health Monitor for the balancer. + :type health_monitor: :class:`RackspaceHealthMonitor` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_update_balancer_health_monitor_no_poll( + balancer, health_monitor) + if not accepted: + msg = 'Update health monitor request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_update_balancer_health_monitor_no_poll(self, balancer, + health_monitor): + """ + Sets a Balancer's health monitor. This method returns immediately. + + :param balancer: Balancer to update health monitor on. + :type balancer: :class:`LoadBalancer` + + :param health_monitor: Health Monitor for the balancer. + :type health_monitor: :class:`RackspaceHealthMonitor` + + :return: Returns whether the update request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/healthmonitor' % (balancer.id) + + resp = self.connection.request( + uri, method='PUT', data=json.dumps(health_monitor._to_dict())) + + return resp.status == httplib.ACCEPTED + + def ex_disable_balancer_health_monitor(self, balancer): + """ + Disables a Balancer's health monitor. This method blocks until the + disable request has been processed and the balancer is in a RUNNING + state again. + + :param balancer: Balancer to disable health monitor on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_disable_balancer_health_monitor_no_poll(balancer): + msg = 'Disable health monitor request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_disable_balancer_health_monitor_no_poll(self, balancer): + """ + Disables a Balancer's health monitor. This method returns + immediately. + + :param balancer: Balancer to disable health monitor on. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the disable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/healthmonitor' % (balancer.id) + + resp = self.connection.request(uri, + method='DELETE') + + return resp.status == httplib.ACCEPTED + + def ex_update_balancer_connection_throttle(self, balancer, + connection_throttle): + """ + Updates a Balancer's connection throttle. This method blocks until + the update request has been processed and the balancer is in a + RUNNING state again. + + :param balancer: Balancer to update connection throttle on. + :type balancer: :class:`LoadBalancer` + + :param connection_throttle: Connection Throttle for the balancer. + :type connection_throttle: :class:`RackspaceConnectionThrottle` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_update_balancer_connection_throttle_no_poll( + balancer, connection_throttle) + + if not accepted: + msg = 'Update connection throttle request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_update_balancer_connection_throttle_no_poll(self, balancer, + connection_throttle): + """ + Sets a Balancer's connection throttle. This method returns + immediately. + + :param balancer: Balancer to update connection throttle on. + :type balancer: :class:`LoadBalancer` + + :param connection_throttle: Connection Throttle for the balancer. + :type connection_throttle: :class:`RackspaceConnectionThrottle` + + :return: Returns whether the update request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id) + resp = self.connection.request( + uri, method='PUT', + data=json.dumps(connection_throttle._to_dict())) + + return resp.status == httplib.ACCEPTED + + def ex_disable_balancer_connection_throttle(self, balancer): + """ + Disables a Balancer's connection throttle. This method blocks until + the disable request has been processed and the balancer is in a RUNNING + state again. + + :param balancer: Balancer to disable connection throttle on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_disable_balancer_connection_throttle_no_poll(balancer): + msg = 'Disable connection throttle request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_disable_balancer_connection_throttle_no_poll(self, balancer): + """ + Disables a Balancer's connection throttle. This method returns + immediately. + + :param balancer: Balancer to disable connection throttle on. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the disable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == httplib.ACCEPTED + + def ex_enable_balancer_connection_logging(self, balancer): + """ + Enables connection logging for a Balancer. This method blocks until + the enable request has been processed and the balancer is in a RUNNING + state again. + + :param balancer: Balancer to enable connection logging on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_enable_balancer_connection_logging_no_poll(balancer): + msg = 'Enable connection logging request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_enable_balancer_connection_logging_no_poll(self, balancer): + """ + Enables connection logging for a Balancer. This method returns + immediately. + + :param balancer: Balancer to enable connection logging on. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the enable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/connectionlogging' % (balancer.id) + + resp = self.connection.request( + uri, method='PUT', + data=json.dumps({'connectionLogging': {'enabled': True}}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_disable_balancer_connection_logging(self, balancer): + """ + Disables connection logging for a Balancer. This method blocks until + the enable request has been processed and the balancer is in a RUNNING + state again. + + :param balancer: Balancer to disable connection logging on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_disable_balancer_connection_logging_no_poll(balancer): + msg = 'Disable connection logging request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_disable_balancer_connection_logging_no_poll(self, balancer): + """ + Disables connection logging for a Balancer. This method returns + immediately. + + :param balancer: Balancer to disable connection logging on. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the disable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/connectionlogging' % (balancer.id) + resp = self.connection.request( + uri, method='PUT', + data=json.dumps({'connectionLogging': {'enabled': False}}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_enable_balancer_session_persistence(self, balancer): + """ + Enables session persistence for a Balancer by setting the persistence + type to 'HTTP_COOKIE'. This method blocks until the enable request + has been processed and the balancer is in a RUNNING state again. + + :param balancer: Balancer to enable session persistence on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_enable_balancer_session_persistence_no_poll(balancer): + msg = 'Enable session persistence request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_enable_balancer_session_persistence_no_poll(self, balancer): + """ + Enables session persistence for a Balancer by setting the persistence + type to 'HTTP_COOKIE'. This method returns immediately. + + :param balancer: Balancer to enable session persistence on. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the enable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id) + resp = self.connection.request( + uri, method='PUT', + data=json.dumps( + {'sessionPersistence': {'persistenceType': 'HTTP_COOKIE'}}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_disable_balancer_session_persistence(self, balancer): + """ + Disables session persistence for a Balancer. This method blocks until + the disable request has been processed and the balancer is in a RUNNING + state again. + + :param balancer: Balancer to disable session persistence on. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_disable_balancer_session_persistence_no_poll(balancer): + msg = 'Disable session persistence request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_disable_balancer_session_persistence_no_poll(self, balancer): + """ + Disables session persistence for a Balancer. This method returns + immediately. + + :param balancer: Balancer to disable session persistence for. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the disable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == httplib.ACCEPTED + + def ex_update_balancer_error_page(self, balancer, page_content): + """ + Updates a Balancer's custom error page. This method blocks until + the update request has been processed and the balancer is in a + RUNNING state again. + + :param balancer: Balancer to update the custom error page for. + :type balancer: :class:`LoadBalancer` + + :param page_content: HTML content for the custom error page. + :type page_content: ``str`` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_update_balancer_error_page_no_poll(balancer, + page_content) + if not accepted: + msg = 'Update error page request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_update_balancer_error_page_no_poll(self, balancer, page_content): + """ + Updates a Balancer's custom error page. This method returns + immediately. + + :param balancer: Balancer to update the custom error page for. + :type balancer: :class:`LoadBalancer` + + :param page_content: HTML content for the custom error page. + :type page_content: ``str`` + + :return: Returns whether the update request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/errorpage' % (balancer.id) + resp = self.connection.request( + uri, method='PUT', + data=json.dumps({'errorpage': {'content': page_content}}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_disable_balancer_custom_error_page(self, balancer): + """ + Disables a Balancer's custom error page, returning its error page to + the Rackspace-provided default. This method blocks until the disable + request has been processed and the balancer is in a RUNNING state + again. + + :param balancer: Balancer to disable the custom error page for. + :type balancer: :class:`LoadBalancer` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + if not self.ex_disable_balancer_custom_error_page_no_poll(balancer): + msg = 'Disable custom error page request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_disable_balancer_custom_error_page_no_poll(self, balancer): + """ + Disables a Balancer's custom error page, returning its error page to + the Rackspace-provided default. This method returns immediately. + + :param balancer: Balancer to disable the custom error page for. + :type balancer: :class:`LoadBalancer` + + :return: Returns whether the disable request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/errorpage' % (balancer.id) + resp = self.connection.request(uri, method='DELETE') + + # Load Balancer API currently returns 200 OK on custom error page + # delete. + return resp.status == httplib.OK or resp.status == httplib.ACCEPTED + + def ex_create_balancer_access_rule(self, balancer, rule): + """ + Adds an access rule to a Balancer's access list. This method blocks + until the update request has been processed and the balancer is in a + RUNNING state again. + + :param balancer: Balancer to create the access rule for. + :type balancer: :class:`LoadBalancer` + + :param rule: Access Rule to add to the balancer. + :type rule: :class:`RackspaceAccessRule` + + :return: The created access rule. + :rtype: :class:`RackspaceAccessRule` + """ + accepted = self.ex_create_balancer_access_rule_no_poll(balancer, rule) + if not accepted: + msg = 'Create access rule not accepted' + raise LibcloudError(msg, driver=self) + + balancer = self._get_updated_balancer(balancer) + access_list = balancer.extra['accessList'] + + created_rule = self._find_matching_rule(rule, access_list) + + if not created_rule: + raise LibcloudError('Could not find created rule') + + return created_rule + + def ex_create_balancer_access_rule_no_poll(self, balancer, rule): + """ + Adds an access rule to a Balancer's access list. This method returns + immediately. + + :param balancer: Balancer to create the access rule for. + :type balancer: :class:`LoadBalancer` + + :param rule: Access Rule to add to the balancer. + :type rule: :class:`RackspaceAccessRule` + + :return: Returns whether the create request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/accesslist' % (balancer.id) + resp = self.connection.request( + uri, method='POST', + data=json.dumps({'networkItem': rule._to_dict()}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_create_balancer_access_rules(self, balancer, rules): + """ + Adds a list of access rules to a Balancer's access list. This method + blocks until the update request has been processed and the balancer is + in a RUNNING state again. + + :param balancer: Balancer to create the access rule for. + :type balancer: :class:`LoadBalancer` + + :param rules: List of :class:`RackspaceAccessRule` to add to the + balancer. + :type rules: ``list`` of :class:`RackspaceAccessRule` + + :return: The created access rules. + :rtype: :class:`RackspaceAccessRule` + """ + accepted = self.ex_create_balancer_access_rules_no_poll(balancer, + rules) + if not accepted: + msg = 'Create access rules not accepted' + raise LibcloudError(msg, driver=self) + + balancer = self._get_updated_balancer(balancer) + access_list = balancer.extra['accessList'] + + created_rules = [] + for r in rules: + matched_rule = self._find_matching_rule(r, access_list) + if matched_rule: + created_rules.append(matched_rule) + + if len(created_rules) != len(rules): + raise LibcloudError('Could not find all created rules') + + return created_rules + + def _find_matching_rule(self, rule_to_find, access_list): + """ + LB API does not return the ID for the newly created rules, so we have + to search the list to find the rule with a matching rule type and + address to return an object with the right identifier.it. The API + enforces rule type and address uniqueness. + """ + for r in access_list: + if rule_to_find.rule_type == r.rule_type and\ + rule_to_find.address == r.address: + return r + + return None + + def ex_create_balancer_access_rules_no_poll(self, balancer, rules): + """ + Adds a list of access rules to a Balancer's access list. This method + returns immediately. + + :param balancer: Balancer to create the access rule for. + :type balancer: :class:`LoadBalancer` + + :param rules: List of :class:`RackspaceAccessRule` to add to + the balancer. + :type rules: ``list`` of :class:`RackspaceAccessRule` + + :return: Returns whether the create request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/accesslist' % (balancer.id) + resp = self.connection.request( + uri, method='POST', + data=json.dumps({'accessList': + [rule._to_dict() for rule in rules]}) + ) + + return resp.status == httplib.ACCEPTED + + def ex_destroy_balancer_access_rule(self, balancer, rule): + """ + Removes an access rule from a Balancer's access list. This method + blocks until the update request has been processed and the balancer + is in a RUNNING state again. + + :param balancer: Balancer to remove the access rule from. + :type balancer: :class:`LoadBalancer` + + :param rule: Access Rule to remove from the balancer. + :type rule: :class:`RackspaceAccessRule` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule) + if not accepted: + msg = 'Delete access rule not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_destroy_balancer_access_rule_no_poll(self, balancer, rule): + """ + Removes an access rule from a Balancer's access list. This method + returns immediately. + + :param balancer: Balancer to remove the access rule from. + :type balancer: :class:`LoadBalancer` + + :param rule: Access Rule to remove from the balancer. + :type rule: :class:`RackspaceAccessRule` + + :return: Returns whether the destroy request was accepted. + :rtype: ``bool`` + """ + uri = '/loadbalancers/%s/accesslist/%s' % (balancer.id, rule.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == httplib.ACCEPTED + + def ex_destroy_balancer_access_rules(self, balancer, rules): + """ + Removes a list of access rules from a Balancer's access list. This + method blocks until the update request has been processed and the + balancer is in a RUNNING state again. + + :param balancer: Balancer to remove the access rules from. + :type balancer: :class:`LoadBalancer` + + :param rules: List of :class:`RackspaceAccessRule` objects to remove + from the balancer. + :type rules: ``list`` of :class:`RackspaceAccessRule` + + :return: Updated Balancer. + :rtype: :class:`LoadBalancer` + """ + accepted = self.ex_destroy_balancer_access_rules_no_poll( + balancer, rules) + + if not accepted: + msg = 'Destroy access rules request not accepted' + raise LibcloudError(msg, driver=self) + + return self._get_updated_balancer(balancer) + + def ex_destroy_balancer_access_rules_no_poll(self, balancer, rules): + """ + Removes a list of access rules from a Balancer's access list. This + method returns immediately. + + :param balancer: Balancer to remove the access rules from. + :type balancer: :class:`LoadBalancer` + + :param rules: List of :class:`RackspaceAccessRule` objects to remove + from the balancer. + :type rules: ``list`` of :class:`RackspaceAccessRule` + + :return: Returns whether the destroy request was accepted. + :rtype: ``bool`` + """ + ids = [('id', rule.id) for rule in rules] + uri = '/loadbalancers/%s/accesslist' % balancer.id + + resp = self.connection.request(uri, + method='DELETE', + params=ids) + + return resp.status == httplib.ACCEPTED + + def ex_list_current_usage(self, balancer): + """ + Return current load balancer usage report. + + :param balancer: Balancer to remove the access rules from. + :type balancer: :class:`LoadBalancer` + + :return: Raw load balancer usage object. + :rtype: ``dict`` + """ + uri = '/loadbalancers/%s/usage/current' % (balancer.id) + resp = self.connection.request(uri, method='GET') + + return resp.object def _to_protocols(self, object): protocols = [] @@ -155,24 +1309,222 @@ protocols.append(item['name'].lower()) return protocols + def _to_protocols_with_default_ports(self, object): + protocols = [] + for item in object["protocols"]: + name = item['name'].lower() + port = int(item['port']) + protocols.append((name, port)) + + return protocols + def _to_balancers(self, object): - return [ self._to_balancer(el) for el in object["loadBalancers"] ] + return [self._to_balancer(el) for el in object["loadBalancers"]] def _to_balancer(self, el): - lb = LoadBalancer(id=el["id"], - name=el["name"], - state=self.LB_STATE_MAP.get( - el["status"], State.UNKNOWN), - ip=el["virtualIps"][0]["address"], - port=el["port"], - driver=self.connection.driver) - return lb + ip = None + port = None + sourceAddresses = {} + + if 'port' in el: + port = el["port"] + + if 'sourceAddresses' in el: + sourceAddresses = el['sourceAddresses'] + + extra = { + "ipv6PublicSource": sourceAddresses.get("ipv6Public"), + "ipv4PublicSource": sourceAddresses.get("ipv4Public"), + "ipv4PrivateSource": sourceAddresses.get("ipv4Servicenet"), + } + + if 'virtualIps' in el: + ip = el['virtualIps'][0]['address'] + extra['virtualIps'] = el['virtualIps'] + + if 'protocol' in el: + extra['protocol'] = el['protocol'] + + if 'algorithm' in el and \ + el["algorithm"] in self._VALUE_TO_ALGORITHM_MAP: + extra["algorithm"] = self._value_to_algorithm(el["algorithm"]) + + if 'healthMonitor' in el: + health_monitor = self._to_health_monitor(el) + if health_monitor: + extra["healthMonitor"] = health_monitor + + if 'connectionThrottle' in el: + extra["connectionThrottle"] = self._to_connection_throttle(el) + + if 'sessionPersistence' in el: + persistence = el["sessionPersistence"] + extra["sessionPersistenceType"] =\ + persistence.get("persistenceType") + + if 'connectionLogging' in el: + logging = el["connectionLogging"] + extra["connectionLoggingEnabled"] = logging.get("enabled") + + if 'nodes' in el: + extra['members'] = self._to_members(el) + + if 'created' in el: + extra['created'] = self._iso_to_datetime(el['created']['time']) + + if 'updated' in el: + extra['updated'] = self._iso_to_datetime(el['updated']['time']) + + if 'accessList' in el: + extra['accessList'] = [self._to_access_rule(rule) + for rule in el['accessList']] + + return LoadBalancer(id=el["id"], + name=el["name"], + state=self.LB_STATE_MAP.get( + el["status"], State.UNKNOWN), + ip=ip, + port=port, + driver=self.connection.driver, + extra=extra) + + def _to_members(self, object, balancer=None): + return [self._to_member(el, balancer) for el in object["nodes"]] + + def _to_member(self, el, balancer=None): + extra = {} + if 'weight' in el: + extra['weight'] = el["weight"] + + if 'condition' in el and\ + el['condition'] in self.LB_MEMBER_CONDITION_MAP: + extra['condition'] =\ + self.LB_MEMBER_CONDITION_MAP.get(el["condition"]) - def _to_members(self, object): - return [ self._to_member(el) for el in object["nodes"] ] + if 'status' in el: + extra['status'] = el["status"] - def _to_member(self, el): lbmember = Member(id=el["id"], - ip=el["address"], - port=el["port"]) + ip=el["address"], + port=el["port"], + balancer=balancer, + extra=extra) return lbmember + + def _protocol_to_value(self, protocol): + non_standard_protocols = {'imapv2': 'IMAPv2', 'imapv3': 'IMAPv3', + 'imapv4': 'IMAPv4'} + protocol_name = protocol.lower() + + if protocol_name in non_standard_protocols: + protocol_value = non_standard_protocols[protocol_name] + else: + protocol_value = protocol.upper() + + return protocol_value + + def _kwargs_to_mutable_attrs(self, **attrs): + update_attrs = {} + if "name" in attrs: + update_attrs['name'] = attrs['name'] + + if "algorithm" in attrs: + algorithm_value = self._algorithm_to_value(attrs['algorithm']) + update_attrs['algorithm'] = algorithm_value + + if "protocol" in attrs: + update_attrs['protocol'] =\ + self._protocol_to_value(attrs['protocol']) + + if "port" in attrs: + update_attrs['port'] = int(attrs['port']) + + if "vip" in attrs: + if attrs['vip'] == 'PUBLIC' or attrs['vip'] == 'SERVICENET': + update_attrs['virtualIps'] = [{'type': attrs['vip']}] + else: + update_attrs['virtualIps'] = [{'id': attrs['vip']}] + + return update_attrs + + def _kwargs_to_mutable_member_attrs(self, **attrs): + update_attrs = {} + if 'condition' in attrs: + update_attrs['condition'] =\ + self.CONDITION_LB_MEMBER_MAP.get(attrs['condition']) + + if 'weight' in attrs: + update_attrs['weight'] = attrs['weight'] + + return update_attrs + + def _to_health_monitor(self, el): + health_monitor_data = el["healthMonitor"] + + type = health_monitor_data.get("type") + delay = health_monitor_data.get("delay") + timeout = health_monitor_data.get("timeout") + attempts_before_deactivation =\ + health_monitor_data.get("attemptsBeforeDeactivation") + + if type == "CONNECT": + return RackspaceHealthMonitor( + type=type, delay=delay, timeout=timeout, + attempts_before_deactivation=attempts_before_deactivation) + + if type == "HTTP" or type == "HTTPS": + return RackspaceHTTPHealthMonitor( + type=type, delay=delay, timeout=timeout, + attempts_before_deactivation=attempts_before_deactivation, + path=health_monitor_data.get("path"), + status_regex=health_monitor_data.get("statusRegex"), + body_regex=health_monitor_data.get("bodyRegex", '')) + + return None + + def _to_connection_throttle(self, el): + connection_throttle_data = el["connectionThrottle"] + + min_connections = connection_throttle_data.get("minConnections") + max_connections = connection_throttle_data.get("maxConnections") + max_connection_rate = connection_throttle_data.get("maxConnectionRate") + rate_interval = connection_throttle_data.get("rateInterval") + + return RackspaceConnectionThrottle( + min_connections=min_connections, + max_connections=max_connections, + max_connection_rate=max_connection_rate, + rate_interval_seconds=rate_interval) + + def _to_access_rule(self, el): + return RackspaceAccessRule( + id=el.get("id"), + rule_type=self._to_access_rule_type(el.get("type")), + address=el.get("address")) + + def _to_access_rule_type(self, type): + if type == "ALLOW": + return RackspaceAccessRuleType.ALLOW + elif type == "DENY": + return RackspaceAccessRuleType.DENY + + def _iso_to_datetime(self, isodate): + date_formats = ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S%z') + date = None + + for date_format in date_formats: + try: + date = datetime.strptime(isodate, date_format) + except ValueError: + pass + + if date: + break + + return date + + +class RackspaceUKLBDriver(RackspaceLBDriver): + def __init__(self, *args, **kwargs): + kwargs['region'] = 'lon' + super(RackspaceUKLBDriver, self).__init__(*args, **kwargs) diff -Nru libcloud-0.5.0/libcloud/loadbalancer/__init__.py libcloud-0.15.1/libcloud/loadbalancer/__init__.py --- libcloud-0.5.0/libcloud/loadbalancer/__init__.py 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/__init__.py 2013-08-30 12:21:18.000000000 +0000 @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +""" +Module for working with Load Balancers +""" + __all__ = [ 'base', 'providers', 'types', 'drivers' ] - diff -Nru libcloud-0.5.0/libcloud/loadbalancer/providers.py libcloud-0.15.1/libcloud/loadbalancer/providers.py --- libcloud-0.5.0/libcloud/loadbalancer/providers.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/providers.py 2013-11-29 12:35:04.000000000 +0000 @@ -13,21 +13,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libcloud.utils import get_driver as get_provider_driver +from libcloud.utils.misc import get_driver as get_provider_driver +from libcloud.utils.misc import set_driver as set_provider_driver from libcloud.loadbalancer.types import Provider __all__ = [ - "Provider", - "DRIVERS", - "get_driver", - ] + "Provider", + "DRIVERS", + "get_driver", +] DRIVERS = { - Provider.RACKSPACE_US: - ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), - Provider.GOGRID: - ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'), + Provider.RACKSPACE: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), + Provider.GOGRID: + ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'), + Provider.NINEFOLD: + ('libcloud.loadbalancer.drivers.ninefold', 'NinefoldLBDriver'), + Provider.BRIGHTBOX: + ('libcloud.loadbalancer.drivers.brightbox', 'BrightboxLBDriver'), + Provider.ELB: + ('libcloud.loadbalancer.drivers.elb', 'ElasticLBDriver'), + Provider.CLOUDSTACK: + ('libcloud.loadbalancer.drivers.cloudstack', 'CloudStackLBDriver'), + Provider.GCE: + ('libcloud.loadbalancer.drivers.gce', 'GCELBDriver'), + + # Deprecated + Provider.RACKSPACE_US: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), + Provider.RACKSPACE_UK: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceUKLBDriver'), } + def get_driver(provider): return get_provider_driver(DRIVERS, provider) + + +def set_driver(provider, module, klass): + return set_provider_driver(DRIVERS, provider, module, klass) diff -Nru libcloud-0.5.0/libcloud/loadbalancer/types.py libcloud-0.15.1/libcloud/loadbalancer/types.py --- libcloud-0.5.0/libcloud/loadbalancer/types.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/libcloud/loadbalancer/types.py 2013-11-29 12:35:04.000000000 +0000 @@ -14,30 +14,57 @@ # limitations under the License. __all__ = [ - "Provider", - "State", - "LibcloudLBError", - "LibcloudLBImmutableError", - ] + "Provider", + "State", + "LibcloudLBError", + "LibcloudLBImmutableError", +] from libcloud.common.types import LibcloudError -class LibcloudLBError(LibcloudError): pass -class LibcloudLBImmutableError(LibcloudLBError): pass +class LibcloudLBError(LibcloudError): + pass + + +class LibcloudLBImmutableError(LibcloudLBError): + pass + class Provider(object): - RACKSPACE_US = 0 - GOGRID = 1 + RACKSPACE = 'rackspace' + GOGRID = 'gogrid' + NINEFOLD = 'ninefold' + BRIGHTBOX = 'brightbox' + ELB = 'elb' + CLOUDSTACK = 'cloudstack' + GCE = 'gce' + + # Deprecated + RACKSPACE_US = 'rackspace_us' + RACKSPACE_UK = 'rackspace_uk' + class State(object): """ - Standart states for a loadbalancer + Standard states for a loadbalancer - @cvar RUNNING: loadbalancer is running and ready to use - @cvar UNKNOWN: loabalancer state is unknown + :cvar RUNNING: loadbalancer is running and ready to use + :cvar UNKNOWN: loabalancer state is unknown """ RUNNING = 0 PENDING = 1 UNKNOWN = 2 + ERROR = 3 + DELETED = 4 + + +class MemberCondition(object): + """ + Each member of a load balancer can have an associated condition + which determines its role within the load balancer. + """ + ENABLED = 0 + DISABLED = 1 + DRAINING = 2 diff -Nru libcloud-0.5.0/libcloud/pricing.py libcloud-0.15.1/libcloud/pricing.py --- libcloud-0.5.0/libcloud/pricing.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/pricing.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,45 +13,73 @@ # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement + """ A class which handles loading the pricing files. """ +import os.path +from os.path import join as pjoin + try: - import json -except: import simplejson as json +except ImportError: + import json -import os.path -from os.path import join as pjoin +from libcloud.utils.connection import get_response_object -PRICING_FILE_PATH = 'data/pricing.json' +__all__ = [ + 'get_pricing', + 'get_size_price', + 'set_pricing', + 'clear_pricing_data', + 'download_pricing_file' +] + +# Default URL to the pricing file +DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA + +CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) +DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json') +CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json') +# Pricing data cache PRICING_DATA = { 'compute': {}, 'storage': {} } +VALID_PRICING_DRIVER_TYPES = ['compute', 'storage'] + + def get_pricing_file_path(file_path=None): - pricing_directory = os.path.dirname(os.path.abspath(__file__)) - pricing_file_path = pjoin(pricing_directory, PRICING_FILE_PATH) + if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \ + os.path.isfile(CUSTOM_PRICING_FILE_PATH): + # Custom pricing file is available, use it + return CUSTOM_PRICING_FILE_PATH + + return DEFAULT_PRICING_FILE_PATH - return pricing_file_path def get_pricing(driver_type, driver_name, pricing_file_path=None): """ Return pricing for the provided driver. - @type driver_type: C{str} - @param driver_type: Driver type ('compute' or 'storage') + :type driver_type: ``str`` + :param driver_type: Driver type ('compute' or 'storage') - @type driver_name: C{str} - @param driver_name: Driver name + :type driver_name: ``str` + :param driver_name: Driver name - @return C{dict} Dictionary with pricing where a key name iz size ID and - the value is a price. + :type pricing_file_path: ``str`` + :param pricing_file_path: Custom path to a price file. If not provided + it uses a default path. + + :rtype: ``dict`` + :return: Dictionary with pricing where a key name is size ID and + the value is a price. """ - if not driver_type in [ 'compute', 'storage' ]: + if driver_type not in VALID_PRICING_DRIVER_TYPES: raise AttributeError('Invalid driver type: %s', driver_type) if driver_name in PRICING_DATA[driver_type]: @@ -63,63 +91,126 @@ with open(pricing_file_path) as fp: content = fp.read() - pricing = json.loads(content)[driver_name] + pricing_data = json.loads(content) + size_pricing = pricing_data[driver_type][driver_name] + + for driver_type in VALID_PRICING_DRIVER_TYPES: + pricing = pricing_data.get(driver_type, None) + if pricing: + PRICING_DATA[driver_type] = pricing + + return size_pricing - PRICING_DATA[driver_type][driver_name] = pricing - return pricing def set_pricing(driver_type, driver_name, pricing): """ Populate the driver pricing dictionary. - @type driver_type: C{str} - @param driver_type: Driver type ('compute' or 'storage') + :type driver_type: ``str`` + :param driver_type: Driver type ('compute' or 'storage') - @type driver_name: C{str} - @param driver_name: Driver name + :type driver_name: ``str`` + :param driver_name: Driver name - @type pricing: C{dict} - @param pricing: Dictionary where a key is a size ID and a value is a price. + :type pricing: ``dict`` + :param pricing: Dictionary where a key is a size ID and a value is a price. """ PRICING_DATA[driver_type][driver_name] = pricing + def get_size_price(driver_type, driver_name, size_id): """ Return price for the provided size. - @type driver_type: C{str} - @param driver_type: Driver type ('compute' or 'storage') + :type driver_type: ``str`` + :param driver_type: Driver type ('compute' or 'storage') - @type driver_name: C{str} - @param driver_name: Driver name + :type driver_name: ``str`` + :param driver_name: Driver name - @type size_id: C{int/str} - @param size_id: Unique size ID (can be an integer or a string - depends on + :type size_id: ``str`` or ``int`` + :param size_id: Unique size ID (can be an integer or a string - depends on the driver) - @return C{int} Size price. + :rtype: ``float`` + :return: Size price. """ pricing = get_pricing(driver_type=driver_type, driver_name=driver_name) price = float(pricing[size_id]) return price + def invalidate_pricing_cache(): """ - Invalidate the cache for all the drivers. + Invalidate pricing cache for all the drivers. """ PRICING_DATA['compute'] = {} PRICING_DATA['storage'] = {} + +def clear_pricing_data(): + """ + Invalidate pricing cache for all the drivers. + + Note: This method does the same thing as invalidate_pricing_cache and is + here for backward compatibility reasons. + """ + invalidate_pricing_cache() + + def invalidate_module_pricing_cache(driver_type, driver_name): """ Invalidate the cache for the specified driver. - @type driver_type: C{str} - @param driver_type: Driver type ('compute' or 'storage') + :type driver_type: ``str`` + :param driver_type: Driver type ('compute' or 'storage') - @type driver_name: C{str} - @param driver_name: Driver name + :type driver_name: ``str`` + :param driver_name: Driver name """ if driver_name in PRICING_DATA[driver_type]: del PRICING_DATA[driver_type][driver_name] + + +def download_pricing_file(file_url=DEFAULT_FILE_URL, + file_path=CUSTOM_PRICING_FILE_PATH): + """ + Download pricing file from the file_url and save it to file_path. + + :type file_url: ``str`` + :param file_url: URL pointing to the pricing file. + + :type file_path: ``str`` + :param file_path: Path where a download pricing file will be saved. + """ + dir_name = os.path.dirname(file_path) + + if not os.path.exists(dir_name): + # Verify a valid path is provided + msg = ('Can\'t write to %s, directory %s, doesn\'t exist' % + (file_path, dir_name)) + raise ValueError(msg) + + if os.path.exists(file_path) and os.path.isdir(file_path): + msg = ('Can\'t write to %s file path because it\'s a' + ' directory' % (file_path)) + raise ValueError(msg) + + response = get_response_object(file_url) + body = response.body + + # Verify pricing file is valid + try: + data = json.loads(body) + except json.decoder.JSONDecodeError: + msg = 'Provided URL doesn\'t contain valid pricing data' + raise Exception(msg) + + if not data.get('updated', None): + msg = 'Provided URL doesn\'t contain valid pricing data' + raise Exception(msg) + + # No need to stream it since file is small + with open(file_path, 'w') as file_handle: + file_handle.write(body) diff -Nru libcloud-0.5.0/libcloud/providers.py libcloud-0.15.1/libcloud/providers.py --- libcloud-0.5.0/libcloud/providers.py 2011-05-08 22:38:53.000000000 +0000 +++ libcloud-0.15.1/libcloud/providers.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.providers import ( - DRIVERS, - Provider, - get_driver, - ) -__all__ = [ - "DRIVERS", - "Provider", - "get_driver", - ] -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/security.py libcloud-0.15.1/libcloud/security.py --- libcloud-0.5.0/libcloud/security.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/security.py 2014-06-11 14:28:05.000000000 +0000 @@ -19,14 +19,16 @@ import libcloud.security libcloud.security.VERIFY_SSL_CERT = True - # optional - libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt") + # Optional. + libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt') """ -# For backward compatibility this option is disabled by default -VERIFY_SSL_CERT = False + +import os + +VERIFY_SSL_CERT = True # File containing one or more PEM-encoded CA certificates -# concatenated together +# concatenated together. CA_CERTS_PATH = [ # centos/fedora: openssl '/etc/pki/tls/certs/ca-bundle.crt', @@ -39,11 +41,35 @@ # macports: curl-ca-bundle '/opt/local/share/curl/curl-ca-bundle.crt', + + # homebrew: openssl + '/usr/local/etc/openssl/cert.pem', + + # homebrew: curl-ca-bundle (backward compatibility) + '/usr/local/opt/curl-ca-bundle/share/ca-bundle.crt', ] -CA_CERTS_UNAVAILABLE_MSG = ( - 'Warning: No CA Certificates were found in CA_CERTS_PATH. ' - 'Toggling VERIFY_SSL_CERT to False.' +# Allow user to explicitly specify which CA bundle to use, using an environment +# variable +environment_cert_file = os.getenv('SSL_CERT_FILE', None) +if environment_cert_file is not None: + # Make sure the file exists + if not os.path.exists(environment_cert_file): + raise ValueError('Certificate file %s doesn\'t exist' % + (environment_cert_file)) + + if not os.path.isfile(environment_cert_file): + raise ValueError('Certificate file can\'t be a directory') + + # If a provided file exists we ignore other common paths because we + # don't want to fall-back to a potentially less restrictive bundle + CA_CERTS_PATH = [environment_cert_file] + +CA_CERTS_UNAVAILABLE_ERROR_MSG = ( + 'No CA Certificates were found in CA_CERTS_PATH. For information on ' + 'how to get required certificate files, please visit ' + 'https://libcloud.readthedocs.org/en/latest/other/' + 'ssl-certificate-validation.html' ) VERIFY_SSL_DISABLED_MSG = ( diff -Nru libcloud-0.5.0/libcloud/ssh.py libcloud-0.15.1/libcloud/ssh.py --- libcloud-0.5.0/libcloud/ssh.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/ssh.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.utils import deprecated_warning -from libcloud.compute.ssh import ( - BaseSSHClient, - ParamikoSSHClient, - ShellOutSSHClient, - SSHClient, - have_paramiko) - -__all__ = [ - "BaseSSHClient", - "ParamikoSSHClient", - "ShellOutSSHClient", - "SSHClient", - "have_paramiko"] -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/storage/base.py libcloud-0.15.1/libcloud/storage/base.py --- libcloud-0.5.0/libcloud/storage/base.py 2011-05-21 11:33:03.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/base.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,21 +13,42 @@ # See the License for the specific language governing permissions and # limitations under the License. +""" +Provides base classes for working with storage +""" + # Backward compatibility for Python 2.5 from __future__ import with_statement -import httplib import os.path # pylint: disable-msg=W0404 import hashlib from os.path import join as pjoin -from libcloud import utils +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import next +from libcloud.utils.py3 import b + +import libcloud.utils.files from libcloud.common.types import LibcloudError -from libcloud.common.base import ConnectionKey +from libcloud.common.base import ConnectionUserAndKey, BaseDriver from libcloud.storage.types import ObjectDoesNotExistError +__all__ = [ + 'Object', + 'Container', + 'StorageDriver', + + 'CHUNK_SIZE', + 'DEFAULT_CONTENT_TYPE' +] + CHUNK_SIZE = 8096 +# Default Content-Type which is sent when uploading an object if one is not +# supplied and can't be detected when using non-strict mode. +DEFAULT_CONTENT_TYPE = 'application/octet-stream' + + class Object(object): """ Represents an object (BLOB). @@ -36,26 +57,26 @@ def __init__(self, name, size, hash, extra, meta_data, container, driver): """ - @type name: C{str} - @param name: Object name (must be unique per container). + :param name: Object name (must be unique per container). + :type name: ``str`` - @type size: C{int} - @param size: Object size in bytes. + :param size: Object size in bytes. + :type size: ``int`` - @type hash: C{string} - @param hash Object hash. + :param hash: Object hash. + :type hash: ``str`` - @type container: C{Container} - @param container: Object container. + :param container: Object container. + :type container: :class:`Container` - @type extra: C{dict} - @param extra: Extra attributes. + :param extra: Extra attributes. + :type extra: ``dict`` - @type meta_data: C{dict} - @param meta_data: Optional object meta data. + :param meta_data: Optional object meta data. + :type meta_data: ``dict`` - @type driver: C{StorageDriver} - @param driver: StorageDriver instance. + :param driver: StorageDriver instance. + :type driver: :class:`StorageDriver` """ self.name = name @@ -69,8 +90,8 @@ def get_cdn_url(self): return self.driver.get_object_cdn_url(obj=self) - def enable_cdn(self): - return self.driver.enable_object_cdn(obj=self) + def enable_cdn(self, **kwargs): + return self.driver.enable_object_cdn(obj=self, **kwargs) def download(self, destination_path, overwrite_existing=False, delete_on_failure=True): @@ -88,6 +109,7 @@ return ('' % (self.name, self.size, self.hash, self.driver.name)) + class Container(object): """ Represents a container (bucket) which can hold multiple objects. @@ -95,44 +117,50 @@ def __init__(self, name, extra, driver): """ - @type name: C{str} - @param name: Container name (must be unique). + :param name: Container name (must be unique). + :type name: ``str`` - @type extra: C{dict} - @param extra: Extra attributes. + :param extra: Extra attributes. + :type extra: ``dict`` - @type driver: C{StorageDriver} - @param driver: StorageDriver instance. + :param driver: StorageDriver instance. + :type driver: :class:`StorageDriver` """ self.name = name self.extra = extra or {} self.driver = driver + def iterate_objects(self): + return self.driver.iterate_container_objects(container=self) + def list_objects(self): return self.driver.list_container_objects(container=self) def get_cdn_url(self): return self.driver.get_container_cdn_url(container=self) - def enable_cdn(self): - return self.driver.enable_container_cdn(container=self) + def enable_cdn(self, **kwargs): + return self.driver.enable_container_cdn(container=self, **kwargs) def get_object(self, object_name): return self.driver.get_object(container_name=self.name, object_name=object_name) - def upload_object(self, file_path, object_name, extra=None, verify_hash=True): + def upload_object(self, file_path, object_name, extra=None, **kwargs): return self.driver.upload_object( - file_path, self, object_name, extra, verify_hash) + file_path, self, object_name, extra=extra, **kwargs) - def upload_object_via_stream(self, iterator, object_name, extra=None): + def upload_object_via_stream(self, iterator, object_name, extra=None, + **kwargs): return self.driver.upload_object_via_stream( - iterator, self, object_name, extra) + iterator, self, object_name, extra=extra, **kwargs) def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): - return self.driver.download_object(obj, destination_path) + return self.driver.download_object( + obj, destination_path, overwrite_existing=overwrite_existing, + delete_on_failure=delete_on_failure) def download_object_as_stream(self, obj, chunk_size=None): return self.driver.download_object_as_stream(obj, chunk_size) @@ -147,61 +175,80 @@ return ('' % (self.name, self.driver.name)) -class StorageDriver(object): + +class StorageDriver(BaseDriver): """ A base StorageDriver to derive from. """ - connectionCls = ConnectionKey + connectionCls = ConnectionUserAndKey name = None hash_type = 'md5' + supports_chunked_encoding = False - def __init__(self, key, secret=None, secure=True, host=None, port=None): - self.key = key - self.secret = secret - self.secure = secure - args = [self.key] + # When strict mode is used, exception will be thrown if no content type is + # provided and none can be detected when uploading an object + strict_mode = False - if self.secret != None: - args.append(self.secret) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + **kwargs): + super(StorageDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) - args.append(secure) + def iterate_containers(self): + """ + Return a generator of containers for the given account - if host != None: - args.append(host) + :return: A generator of Container instances. + :rtype: ``generator`` of :class:`Container` + """ + raise NotImplementedError( + 'iterate_containers not implemented for this driver') - if port != None: - args.append(port) + def list_containers(self): + """ + Return a list of containers. - self.connection = self.connectionCls(*args) + :return: A list of Container instances. + :rtype: ``list`` of :class:`Container` + """ + return list(self.iterate_containers()) + + def iterate_container_objects(self, container): + """ + Return a generator of objects for the given container. - self.connection.driver = self - self.connection.connect() + :param container: Container instance + :type container: :class:`Container` - def list_containters(self): + :return: A generator of Object instances. + :rtype: ``generator`` of :class:`Object` + """ raise NotImplementedError( - 'list_containers not implemented for this driver') + 'iterate_container_objects not implemented for this driver') def list_container_objects(self, container): """ Return a list of objects for the given container. - @type container: C{Container} - @param container: Container instance + :param container: Container instance. + :type container: :class:`Container` - @return A list of Object instances. + :return: A list of Object instances. + :rtype: ``list`` of :class:`Object` """ - raise NotImplementedError( - 'list_objects not implemented for this driver') + return list(self.iterate_container_objects(container)) def get_container(self, container_name): """ Return a container instance. - @type container_name: C{str} - @param container_name: Container name. + :param container_name: Container name. + :type container_name: ``str`` - @return: C{Container} instance. + :return: :class:`Container` instance. + :rtype: :class:`Container` """ raise NotImplementedError( 'get_object not implemented for this driver') @@ -210,10 +257,11 @@ """ Return a container CDN URL. - @type container: C{Container} - @param container: Container instance + :param container: Container instance + :type container: :class:`Container` - @return A CDN URL for this container. + :return: A CDN URL for this container. + :rtype: ``str`` """ raise NotImplementedError( 'get_container_cdn_url not implemented for this driver') @@ -222,57 +270,79 @@ """ Return an object instance. - @type container_name: C{str} - @param container_name: Container name. + :param container_name: Container name. + :type container_name: ``str`` - @type object_name: C{str} - @param object_name: Object name. + :param object_name: Object name. + :type object_name: ``str`` - @return: C{Object} instance. + :return: :class:`Object` instance. + :rtype: :class:`Object` """ raise NotImplementedError( 'get_object not implemented for this driver') def get_object_cdn_url(self, obj): """ - Return a container CDN URL. + Return a object CDN URL. - @type obj: C{Object} - @param obj: Object instance + :param obj: Object instance + :type obj: :class:`Object` - @return A CDN URL for this object. + :return: A CDN URL for this object. + :rtype: ``str`` """ raise NotImplementedError( 'get_object_cdn_url not implemented for this driver') def enable_container_cdn(self, container): + """ + Enable container CDN. + + :param container: Container instance + :type container: :class:`Container` + + :rtype: ``bool`` + """ raise NotImplementedError( 'enable_container_cdn not implemented for this driver') def enable_object_cdn(self, obj): + """ + Enable object CDN. + + :param obj: Object instance + :type obj: :class:`Object` + + :rtype: ``bool`` + """ raise NotImplementedError( 'enable_object_cdn not implemented for this driver') - def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): """ Download an object to the specified destination path. - @type obj; C{Object} - @param obj: Object instance. + :param obj: Object instance. + :type obj: :class:`Object` - @type destination_path: C{str} - @type destination_path: Full path to a file or a directory where the - incoming file will be saved. - - @type overwrite_existing: C{bool} - @type overwrite_existing: True to overwrite an existing file, defaults to False. - - @type delete_on_failure: C{bool} - @param delete_on_failure: True to delete a partially downloaded file if - the download was not successful (hash mismatch / file size). - - @return C{bool} True if an object has been successfully downloaded, False - otherwise. + :param destination_path: Full path to a file or a directory where the + incoming file will be saved. + :type destination_path: ``str`` + + :param overwrite_existing: True to overwrite an existing file, + defaults to False. + :type overwrite_existing: ``bool`` + + :param delete_on_failure: True to delete a partially downloaded file if + the download was not successful (hash + mismatch / file size). + :type delete_on_failure: ``bool`` + + :return: True if an object has been successfully downloaded, False + otherwise. + :rtype: ``bool`` """ raise NotImplementedError( 'download_object not implemented for this driver') @@ -281,11 +351,11 @@ """ Return a generator which yields object data. - @type obj: C{Object} - @param obj: Object instance + :param obj: Object instance + :type obj: :class:`Object` - @type chunk_size: C{int} - @param chunk_size: Optional chunk size (in bytes). + :param chunk_size: Optional chunk size (in bytes). + :type chunk_size: ``int`` """ raise NotImplementedError( 'download_object_as_stream not implemented for this driver') @@ -293,22 +363,24 @@ def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True): """ - Upload an object. + Upload an object currently located on a disk. + + :param file_path: Path to the object on disk. + :type file_path: ``str`` - @type file_path: C{str} - @param file_path: Path to the object on disk. + :param container: Destination container. + :type container: :class:`Container` - @type container: C{Container} - @param container: Destination container. + :param object_name: Object name. + :type object_name: ``str`` - @type object_name: C{str} - @param object_name: Object name. + :param verify_hash: Verify hash + :type verify_hash: ``bool`` - @type extra: C{dict} - @param extra: (optional) Extra attributes (driver specific). + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` - @type verify_hash: C{boolean} - @param verify_hash: True to do a file integrity check. + :rtype: :class:`Object` """ raise NotImplementedError( 'upload_object not implemented for this driver') @@ -317,17 +389,37 @@ object_name, extra=None): """ - @type iterator: C{object} - @param iterator: An object which implements the iterator interface. + Upload an object using an iterator. + + If a provider supports it, chunked transfer encoding is used and you + don't need to know in advance the amount of data to be uploaded. + + Otherwise if a provider doesn't support it, iterator will be exhausted + so a total size for data to be uploaded can be determined. + + Note: Exhausting the iterator means that the whole data must be + buffered in memory which might result in memory exhausting when + uploading a very large object. - @type container: C{Container} - @param container: Destination container. + If a file is located on a disk you are advised to use upload_object + function which uses fs.stat function to determine the file size and it + doesn't need to buffer whole object in the memory. - @type object_name: C{str} - @param object_name: Object name. + :type iterator: :class:`object` + :param iterator: An object which implements the iterator interface. - @type extra: C{dict} - @param extra: (optional) Extra attributes (driver specific). + :type container: :class:`Container` + :param container: Destination container. + + :type object_name: ``str`` + :param object_name: Object name. + + :type extra: ``dict`` + :param extra: (optional) Extra attributes (driver specific). Note: + This dictionary must contain a 'content_type' key which represents + a content type of the stored object. + + :rtype: ``object`` """ raise NotImplementedError( 'upload_object_via_stream not implemented for this driver') @@ -336,10 +428,11 @@ """ Delete an object. - @type obj: C{Object} - @param obj: Object instance. + :type obj: :class:`Object` + :param obj: Object instance. - @return: C{bool} True on success. + :return: ``bool`` True on success. + :rtype: ``bool`` """ raise NotImplementedError( 'delete_object not implemented for this driver') @@ -348,10 +441,11 @@ """ Create a new container. - @type container_name: C{str} - @param container_name: Container name. + :type container_name: ``str`` + :param container_name: Container name. - @return C{Container} instance on success. + :return: Container instance on success. + :rtype: :class:`Container` """ raise NotImplementedError( 'create_container not implemented for this driver') @@ -360,10 +454,11 @@ """ Delete a container. - @type container: C{Container} - @param container: Container instance + :type container: :class:`Container` + :param container: Container instance - @return C{bool} True on success, False otherwise. + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` """ raise NotImplementedError( 'delete_container not implemented for this driver') @@ -373,23 +468,26 @@ """ Call passed callback and start transfer of the object' - @type obj: C{Object} - @param obj: Object instance. - - @type callback: C{Function} - @param callback: Function which is called with the passed callback_kwargs + :type obj: :class:`Object` + :param obj: Object instance. - @type callback_kwargs: C{dict} - @param callback_kwargs: Keyword arguments which are passed to the callback. + :type callback: :class:`function` + :param callback: Function which is called with the passed + callback_kwargs + + :type callback_kwargs: ``dict`` + :param callback_kwargs: Keyword arguments which are passed to the + callback. - @typed response: C{Response} - @param response: Response instance. + :typed response: :class:`Response` + :param response: Response instance. - @type success_status_code: C{int} - @param success_status_code: Status code which represents a successful + :type success_status_code: ``int`` + :param success_status_code: Status code which represents a successful transfer (defaults to httplib.OK) - @return C{bool} True on success, False otherwise. + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` """ success_status_code = success_status_code or httplib.OK @@ -409,26 +507,29 @@ """ Save object to the provided path. - @type response: C{RawResponse} - @param response: RawResponse instance. + :type response: :class:`RawResponse` + :param response: RawResponse instance. - @type obj: C{Object} - @param obj: Object instance. + :type obj: :class:`Object` + :param obj: Object instance. - @type destination_path: C{Str} - @param destination_path: Destination directory. + :type destination_path: ``str`` + :param destination_path: Destination directory. - @type delete_on_failure: C{bool} - @param delete_on_failure: True to delete partially downloaded object if + :type delete_on_failure: ``bool`` + :param delete_on_failure: True to delete partially downloaded object if the download fails. - @type overwrite_existing: C{bool} - @param overwrite_existing: True to overwrite a local path if it already + + :type overwrite_existing: ``bool`` + :param overwrite_existing: True to overwrite a local path if it already exists. - @type chunk_size: C{int} - @param chunk_size: Optional chunk size (defaults to L{libcloud.storage.base.CHUNK_SIZE}, 8kb) + :type chunk_size: ``int`` + :param chunk_size: Optional chunk size + (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb) - @return C{bool} True on success, False otherwise. + :return: ``True`` on success, ``False`` otherwise. + :rtype: ``bool`` """ chunk_size = chunk_size or CHUNK_SIZE @@ -451,10 +552,10 @@ 'overwrite_existing=False', driver=self) - stream = utils.read_in_chunks(response, chunk_size) + stream = libcloud.utils.files.read_in_chunks(response, chunk_size) try: - data_read = stream.next() + data_read = next(stream) except StopIteration: # Empty response? return False @@ -463,11 +564,11 @@ with open(file_path, 'wb') as file_handle: while len(data_read) > 0: - file_handle.write(data_read) + file_handle.write(b(data_read)) bytes_transferred += len(data_read) try: - data_read = stream.next() + data_read = next(stream) except StopIteration: data_read = '' @@ -493,88 +594,171 @@ headers = headers or {} if file_path and not os.path.exists(file_path): - raise OSError('File %s does not exist' % (file_path)) + raise OSError('File %s does not exist' % (file_path)) + + if iterator is not None and not hasattr(iterator, 'next') and not \ + hasattr(iterator, '__next__'): + raise AttributeError('iterator object must implement next() ' + + 'method.') if not content_type: if file_path: name = file_path else: name = object_name - content_type, _ = utils.guess_file_mime_type(name) + content_type, _ = libcloud.utils.files.guess_file_mime_type(name) if not content_type: - raise AttributeError( - 'File content-type could not be guessed and' + - ' no content_type value provided') + if self.strict_mode: + raise AttributeError('File content-type could not be ' + 'guessed and no content_type value ' + 'is provided') + else: + # Fallback to a content-type + content_type = DEFAULT_CONTENT_TYPE + + file_size = None if iterator: - headers['Transfer-Encoding'] = 'chunked' - upload_func_kwargs['chunked'] = True + if self.supports_chunked_encoding: + headers['Transfer-Encoding'] = 'chunked' + upload_func_kwargs['chunked'] = True + else: + # Chunked transfer encoding is not supported. Need to buffer + # all the data in memory so we can determine file size. + iterator = libcloud.utils.files.read_in_chunks( + iterator=iterator) + data = libcloud.utils.files.exhaust_iterator(iterator=iterator) + + file_size = len(data) + upload_func_kwargs['data'] = data else: file_size = os.path.getsize(file_path) - headers['Content-Length'] = file_size upload_func_kwargs['chunked'] = False + if file_size is not None and 'Content-Length' not in headers: + headers['Content-Length'] = file_size + headers['Content-Type'] = content_type response = self.connection.request(request_path, method=request_method, data=None, headers=headers, raw=True) upload_func_kwargs['response'] = response - success, data_hash, bytes_transferred = upload_func(**upload_func_kwargs) + success, data_hash, bytes_transferred = upload_func( + **upload_func_kwargs) if not success: - raise LibcloudError(value='Object upload failed, Perhaps a timeout?', - driver=self) + raise LibcloudError( + value='Object upload failed, Perhaps a timeout?', driver=self) - result_dict = { 'response': response, 'data_hash': data_hash, - 'bytes_transferred': bytes_transferred } + result_dict = {'response': response, 'data_hash': data_hash, + 'bytes_transferred': bytes_transferred} return result_dict + def _upload_data(self, response, data, calculate_hash=True): + """ + Upload data stored in a string. + + :type response: :class:`RawResponse` + :param response: RawResponse object. + + :type data: ``str`` + :param data: Data to upload. + + :type calculate_hash: ``bool`` + :param calculate_hash: True to calculate hash of the transferred data. + (defauls to True). + + :rtype: ``tuple`` + :return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + bytes_transferred = 0 + data_hash = None + + if calculate_hash: + data_hash = self._get_hash_function() + data_hash.update(b(data)) + + try: + response.connection.connection.send(b(data)) + except Exception: + # TODO: let this exception propagate + # Timeout, etc. + return False, None, bytes_transferred + + bytes_transferred = len(data) + + if calculate_hash: + data_hash = data_hash.hexdigest() + + return True, data_hash, bytes_transferred + def _stream_data(self, response, iterator, chunked=False, - calculate_hash=True, chunk_size=None): + calculate_hash=True, chunk_size=None, data=None): """ Stream a data over an http connection. - @type response: C{RawResponse} - @param response: RawResponse object. + :type response: :class:`RawResponse` + :param response: RawResponse object. - @type iterator: C{} - @param response: An object which implements an iterator interface + :type iterator: :class:`object` + :param response: An object which implements an iterator interface or a File like object with read method. - @type chunk_size: C{int} - @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) - - @return C{tuple} First item is a boolean indicator of success, second - one is the uploaded data MD5 hash and the third one - is the number of transferred bytes. + :type chunked: ``bool`` + :param chunked: True if the chunked transfer encoding should be used + (defauls to False). + + :type calculate_hash: ``bool`` + :param calculate_hash: True to calculate hash of the transferred data. + (defauls to True). + + :type chunk_size: ``int`` + :param chunk_size: Optional chunk size (defaults to ``CHUNK_SIZE``) + + :rtype: ``tuple`` + :return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. """ chunk_size = chunk_size or CHUNK_SIZE data_hash = None if calculate_hash: - data_hash = hashlib.md5() + data_hash = self._get_hash_function() - generator = utils.read_in_chunks(iterator, chunk_size) + generator = libcloud.utils.files.read_in_chunks(iterator, chunk_size) bytes_transferred = 0 try: - chunk = generator.next() + chunk = next(generator) except StopIteration: - # No data? - return False, None, None + # Special case when StopIteration is thrown on the first iteration + # create a 0-byte long object + chunk = '' + if chunked: + response.connection.connection.send(b('%X\r\n' % + (len(chunk)))) + response.connection.connection.send(chunk) + response.connection.connection.send(b('\r\n')) + response.connection.connection.send(b('0\r\n\r\n')) + else: + response.connection.connection.send(chunk) + return True, data_hash.hexdigest(), bytes_transferred while len(chunk) > 0: try: if chunked: - response.connection.connection.send('%X\r\n' % - (len(chunk))) - response.connection.connection.send(chunk) - response.connection.connection.send('\r\n') + response.connection.connection.send(b('%X\r\n' % + (len(chunk)))) + response.connection.connection.send(b(chunk)) + response.connection.connection.send(b('\r\n')) else: - response.connection.connection.send(chunk) + response.connection.connection.send(b(chunk)) except Exception: # TODO: let this exception propagate # Timeout, etc. @@ -582,15 +766,15 @@ bytes_transferred += len(chunk) if calculate_hash: - data_hash.update(chunk) + data_hash.update(b(chunk)) try: - chunk = generator.next() + chunk = next(generator) except StopIteration: chunk = '' if chunked: - response.connection.connection.send('0\r\n\r\n') + response.connection.connection.send(b('0\r\n\r\n')) if calculate_hash: data_hash = data_hash.hexdigest() @@ -602,21 +786,22 @@ """ Upload a file to the server. - @type response: C{RawResponse} - @param response: RawResponse object. + :type response: :class:`RawResponse` + :param response: RawResponse object. - @type file_path: C{str} - @param file_path: Path to a local file. + :type file_path: ``str`` + :param file_path: Path to a local file. - @type iterator: C{} - @param response: An object which implements an iterator interface (File + :type iterator: :class:`object` + :param response: An object which implements an iterator interface (File object, etc.) - @return C{tuple} First item is a boolean indicator of success, second - one is the uploaded data MD5 hash and the third one - is the number of transferred bytes. + :rtype: ``tuple`` + :return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. """ - with open (file_path, 'rb') as file_handle: + with open(file_path, 'rb') as file_handle: success, data_hash, bytes_transferred = ( self._stream_data( response=response, @@ -625,3 +810,16 @@ calculate_hash=calculate_hash)) return success, data_hash, bytes_transferred + + def _get_hash_function(self): + """ + Return instantiated hash function for the hash type supported by + the provider. + """ + try: + func = getattr(hashlib, self.hash_type)() + except AttributeError: + raise RuntimeError('Invalid or unsupported hash type: %s' % + (self.hash_type)) + + return func diff -Nru libcloud-0.5.0/libcloud/storage/drivers/atmos.py libcloud-0.15.1/libcloud/storage/drivers/atmos.py --- libcloud-0.5.0/libcloud/storage/drivers/atmos.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/atmos.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,472 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import base64 +import hashlib +import hmac +import time + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import b +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import next +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import urlunquote + +if PY3: + from io import FileIO as file + +from libcloud.utils.files import read_in_chunks, guess_file_mime_type +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.common.types import LibcloudError + +from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE +from libcloud.storage.types import ContainerAlreadyExistsError, \ + ContainerDoesNotExistError, ContainerIsNotEmptyError, \ + ObjectDoesNotExistError + + +def collapse(s): + return ' '.join([x for x in s.split(' ') if x]) + + +class AtmosError(LibcloudError): + def __init__(self, code, message, driver=None): + super(AtmosError, self).__init__(value=message, driver=driver) + self.code = code + + +class AtmosResponse(XmlResponse): + def success(self): + return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT, + httplib.PARTIAL_CONTENT) + + def parse_error(self): + tree = self.parse_body() + + if tree is None: + return None + + code = int(tree.find('Code').text) + message = tree.find('Message').text + raise AtmosError(code=code, message=message, + driver=self.connection.driver) + + +class AtmosConnection(ConnectionUserAndKey): + responseCls = AtmosResponse + + def add_default_headers(self, headers): + headers['x-emc-uid'] = self.user_id + headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', + time.gmtime()) + headers['x-emc-date'] = headers['Date'] + + if 'Content-Type' not in headers: + headers['Content-Type'] = 'application/octet-stream' + if 'Accept' not in headers: + headers['Accept'] = '*/*' + + return headers + + def pre_connect_hook(self, params, headers): + headers['x-emc-signature'] = self._calculate_signature(params, headers) + + return params, headers + + def _calculate_signature(self, params, headers): + pathstring = urlunquote(self.action) + if pathstring.startswith(self.driver.path): + pathstring = pathstring[len(self.driver.path):] + if params: + if type(params) is dict: + params = list(params.items()) + pathstring += '?' + urlencode(params) + pathstring = pathstring.lower() + + xhdrs = [(k, v) for k, v in list(headers.items()) if + k.startswith('x-emc-')] + xhdrs.sort(key=lambda x: x[0]) + + signature = [ + self.method, + headers.get('Content-Type', ''), + headers.get('Range', ''), + headers.get('Date', ''), + pathstring, + ] + signature.extend([k + ':' + collapse(v) for k, v in xhdrs]) + signature = '\n'.join(signature) + key = base64.b64decode(self.key) + signature = hmac.new(b(key), b(signature), hashlib.sha1).digest() + return base64.b64encode(b(signature)).decode('utf-8') + + +class AtmosDriver(StorageDriver): + connectionCls = AtmosConnection + + host = None + path = None + api_name = 'atmos' + supports_chunked_encoding = True + website = 'http://atmosonline.com/' + name = 'atmos' + + DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + host = host or self.host + super(AtmosDriver, self).__init__(key, secret, secure, host, port) + + def iterate_containers(self): + result = self.connection.request(self._namespace_path('')) + entries = self._list_objects(result.object, object_type='directory') + for entry in entries: + extra = { + 'object_id': entry['id'] + } + yield Container(entry['name'], extra, self) + + def get_container(self, container_name): + path = self._namespace_path(container_name) + '/?metadata/system' + try: + result = self.connection.request(path) + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1003: + raise + raise ContainerDoesNotExistError(e, self, container_name) + meta = self._emc_meta(result) + extra = { + 'object_id': meta['objectid'] + } + return Container(container_name, extra, self) + + def create_container(self, container_name): + path = self._namespace_path(container_name) + '/' + try: + self.connection.request(path, method='POST') + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1016: + raise + raise ContainerAlreadyExistsError(e, self, container_name) + return self.get_container(container_name) + + def delete_container(self, container): + try: + self.connection.request(self._namespace_path(container.name) + '/', + method='DELETE') + except AtmosError: + e = sys.exc_info()[1] + if e.code == 1003: + raise ContainerDoesNotExistError(e, self, container.name) + elif e.code == 1023: + raise ContainerIsNotEmptyError(e, self, container.name) + return True + + def get_object(self, container_name, object_name): + container = self.get_container(container_name) + object_name_cleaned = self._clean_object_name(object_name) + path = self._namespace_path(container_name) + '/' + object_name_cleaned + + try: + result = self.connection.request(path + '?metadata/system') + system_meta = self._emc_meta(result) + + result = self.connection.request(path + '?metadata/user') + user_meta = self._emc_meta(result) + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1003: + raise + raise ObjectDoesNotExistError(e, self, object_name) + + last_modified = time.strptime(system_meta['mtime'], + '%Y-%m-%dT%H:%M:%SZ') + last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT', + last_modified) + extra = { + 'object_id': system_meta['objectid'], + 'last_modified': last_modified + } + data_hash = user_meta.pop('md5', '') + return Object(object_name, int(system_meta['size']), data_hash, extra, + user_meta, container, self) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + upload_func = self._upload_file + upload_func_kwargs = {'file_path': file_path} + method = 'PUT' + + extra = extra or {} + object_name_cleaned = self._clean_object_name(object_name) + request_path = self._namespace_path(container.name) + '/' +\ + object_name_cleaned + content_type = extra.get('content_type', None) + + try: + self.connection.request(request_path + '?metadata/system') + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1003: + raise + method = 'POST' + + result_dict = self._upload_object( + object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method=method, + headers={}, file_path=file_path) + + bytes_transferred = result_dict['bytes_transferred'] + + if extra is None: + meta_data = {} + else: + meta_data = extra.get('meta_data', {}) + meta_data['md5'] = result_dict['data_hash'] + user_meta = ', '.join([k + '=' + str(v) for k, v in + list(meta_data.items())]) + self.connection.request(request_path + '?metadata/user', method='POST', + headers={'x-emc-meta': user_meta}) + result = self.connection.request(request_path + '?metadata/system') + meta = self._emc_meta(result) + del meta_data['md5'] + extra = { + 'object_id': meta['objectid'], + 'meta_data': meta_data, + } + + return Object(object_name, bytes_transferred, result_dict['data_hash'], + extra, meta_data, container, self) + + def upload_object_via_stream(self, iterator, container, object_name, + extra=None): + if isinstance(iterator, file): + iterator = iter(iterator) + + data_hash = hashlib.md5() + generator = read_in_chunks(iterator, CHUNK_SIZE, True) + bytes_transferred = 0 + try: + chunk = next(generator) + except StopIteration: + chunk = '' + + path = self._namespace_path(container.name + '/' + object_name) + method = 'PUT' + + if extra is not None: + content_type = extra.get('content_type', None) + else: + content_type = None + if not content_type: + content_type, _ = guess_file_mime_type(object_name) + + if not content_type: + raise AttributeError( + 'File content-type could not be guessed and' + + ' no content_type value provided') + + try: + self.connection.request(path + '?metadata/system') + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1003: + raise + method = 'POST' + + while True: + end = bytes_transferred + len(chunk) - 1 + data_hash.update(b(chunk)) + headers = { + 'x-emc-meta': 'md5=' + data_hash.hexdigest(), + 'Content-Type': content_type, + } + + if len(chunk) > 0 and bytes_transferred > 0: + headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end) + method = 'PUT' + + result = self.connection.request(path, method=method, data=chunk, + headers=headers) + bytes_transferred += len(chunk) + + try: + chunk = next(generator) + except StopIteration: + break + if len(chunk) == 0: + break + + data_hash = data_hash.hexdigest() + + if extra is None: + meta_data = {} + else: + meta_data = extra.get('meta_data', {}) + meta_data['md5'] = data_hash + user_meta = ', '.join([k + '=' + str(v) for k, v in + list(meta_data.items())]) + self.connection.request(path + '?metadata/user', method='POST', + headers={'x-emc-meta': user_meta}) + + result = self.connection.request(path + '?metadata/system') + + meta = self._emc_meta(result) + extra = { + 'object_id': meta['objectid'], + 'meta_data': meta_data, + } + + return Object(object_name, bytes_transferred, data_hash, extra, + meta_data, container, self) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + path = self._namespace_path(obj.container.name + '/' + obj.name) + response = self.connection.request(path, method='GET', raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={ + 'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure + }, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + path = self._namespace_path(obj.container.name + '/' + obj.name) + response = self.connection.request(path, method='GET', raw=True) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={ + 'iterator': response.response, + 'chunk_size': chunk_size + }, + success_status_code=httplib.OK) + + def delete_object(self, obj): + path = self._namespace_path(obj.container.name) + '/' +\ + self._clean_object_name(obj.name) + try: + self.connection.request(path, method='DELETE') + except AtmosError: + e = sys.exc_info()[1] + if e.code != 1003: + raise + raise ObjectDoesNotExistError(e, self, obj.name) + return True + + def enable_object_cdn(self, obj): + return True + + def get_object_cdn_url(self, obj, expiry=None, use_object=False): + """ + Return a object CDN URL. + + :param obj: Object instance + :type obj: :class:`Object` + + :param expiry: Expiry + :type expiry: ``str`` + + :param use_object: Use object + :type use_object: ``bool`` + + :rtype: ``str`` + """ + if use_object: + path = '/rest/objects' + obj.meta_data['object_id'] + else: + path = '/rest/namespace/' + obj.container.name + '/' + obj.name + + if self.secure: + protocol = 'https' + else: + protocol = 'http' + + expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL) + params = [ + ('uid', self.key), + ('expires', expiry), + ] + params.append(('signature', self._cdn_signature(path, params, expiry))) + + params = urlencode(params) + path = self.path + path + return urlparse.urlunparse((protocol, self.host, path, '', params, '')) + + def _cdn_signature(self, path, params, expiry): + key = base64.b64decode(self.secret) + signature = '\n'.join(['GET', path.lower(), self.key, expiry]) + signature = hmac.new(key, signature, hashlib.sha1).digest() + + return base64.b64encode(signature) + + def _list_objects(self, tree, object_type=None): + listing = tree.find(self._emc_tag('DirectoryList')) + entries = [] + for entry in listing.findall(self._emc_tag('DirectoryEntry')): + file_type = entry.find(self._emc_tag('FileType')).text + if object_type is not None and object_type != file_type: + continue + entries.append({ + 'id': entry.find(self._emc_tag('ObjectID')).text, + 'type': file_type, + 'name': entry.find(self._emc_tag('Filename')).text + }) + return entries + + def _clean_object_name(self, name): + return urlquote(name.encode('ascii')) + + def _namespace_path(self, path): + return self.path + '/rest/namespace/' + urlquote(path.encode('ascii')) + + def _object_path(self, object_id): + return self.path + '/rest/objects/' + object_id.encode('ascii') + + @staticmethod + def _emc_tag(tag): + return '{http://www.emc.com/cos/}' + tag + + def _emc_meta(self, response): + meta = response.headers.get('x-emc-meta', '') + if len(meta) == 0: + return {} + meta = meta.split(', ') + return dict([x.split('=', 1) for x in meta]) + + def iterate_container_objects(self, container): + headers = {'x-emc-include-meta': '1'} + path = self._namespace_path(container.name) + '/' + result = self.connection.request(path, headers=headers) + entries = self._list_objects(result.object, object_type='regular') + for entry in entries: + metadata = {'object_id': entry['id']} + yield Object(entry['name'], 0, '', {}, metadata, container, self) diff -Nru libcloud-0.5.0/libcloud/storage/drivers/azure_blobs.py libcloud-0.15.1/libcloud/storage/drivers/azure_blobs.py --- libcloud-0.5.0/libcloud/storage/drivers/azure_blobs.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/azure_blobs.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,986 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import base64 +import os +import binascii + +from xml.etree.ElementTree import Element, SubElement + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import tostring +from libcloud.utils.py3 import b + +from libcloud.utils.xml import fixxpath +from libcloud.utils.files import read_in_chunks +from libcloud.common.types import LibcloudError +from libcloud.common.azure import AzureConnection + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError + +if PY3: + from io import FileIO as file + +# Desired number of items in each response inside a paginated request +RESPONSES_PER_REQUEST = 100 + +# As per the Azure documentation, if the upload file size is less than +# 64MB, we can upload it in a single request. However, in real life azure +# servers seem to disconnect randomly after around 5 MB or 200s of upload. +# So, it is better that for file sizes greater than 4MB, we upload it in +# chunks. +# Also, with large sizes, if we use a lease, the lease will timeout after +# 60 seconds, but the upload might still be in progress. This can be +# handled in code, but if we use chunked uploads, the lease renewal will +# happen automatically. +AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024 + +# Azure block blocks must be maximum 4MB +# Azure page blobs must be aligned in 512 byte boundaries (4MB fits that) +AZURE_CHUNK_SIZE = 4 * 1024 * 1024 + +# Azure page blob must be aligned in 512 byte boundaries +AZURE_PAGE_CHUNK_SIZE = 512 + +# The time period (in seconds) for which a lease must be obtained. +# If set as -1, we get an infinite lease, but that is a bad idea. If +# after getting an infinite lease, there was an issue in releasing the +# lease, the object will remain 'locked' forever, unless the lease is +# released using the lease_id (which is not exposed to the user) +AZURE_LEASE_PERIOD = 60 + +AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net' + + +class AzureBlobLease(object): + """ + A class to help in leasing an azure blob and renewing the lease + """ + def __init__(self, driver, object_path, use_lease): + """ + :param driver: The Azure storage driver that is being used + :type driver: :class:`AzureStorageDriver` + + :param object_path: The path of the object we need to lease + :type object_path: ``str`` + + :param use_lease: Indicates if we must take a lease or not + :type use_lease: ``bool`` + """ + self.object_path = object_path + self.driver = driver + self.use_lease = use_lease + self.lease_id = None + self.params = {'comp': 'lease'} + + def renew(self): + """ + Renew the lease if it is older than a predefined time period + """ + if self.lease_id is None: + return + + headers = {'x-ms-lease-action': 'renew', + 'x-ms-lease-id': self.lease_id, + 'x-ms-lease-duration': '60'} + + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status != httplib.OK: + raise LibcloudError('Unable to obtain lease', driver=self) + + def update_headers(self, headers): + """ + Update the lease id in the headers + """ + if self.lease_id: + headers['x-ms-lease-id'] = self.lease_id + + def __enter__(self): + if not self.use_lease: + return self + + headers = {'x-ms-lease-action': 'acquire', + 'x-ms-lease-duration': '60'} + + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status == httplib.NOT_FOUND: + return self + elif response.status != httplib.CREATED: + raise LibcloudError('Unable to obtain lease', driver=self) + + self.lease_id = response.headers['x-ms-lease-id'] + return self + + def __exit__(self, type, value, traceback): + if self.lease_id is None: + return + + headers = {'x-ms-lease-action': 'release', + 'x-ms-lease-id': self.lease_id} + response = self.driver.connection.request(self.object_path, + headers=headers, + params=self.params, + method='PUT') + + if response.status != httplib.OK: + raise LibcloudError('Unable to release lease', driver=self) + + +class AzureBlobsConnection(AzureConnection): + """ + Represents a single connection to Azure Blobs + """ + + +class AzureBlobsStorageDriver(StorageDriver): + name = 'Microsoft Azure (blobs)' + website = 'http://windows.azure.com/' + connectionCls = AzureBlobsConnection + hash_type = 'md5' + supports_chunked_encoding = False + ex_blob_type = 'BlockBlob' + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + **kwargs): + self._host_argument_set = bool(host) + + # B64decode() this key and keep it, so that we don't have to do + # so for every request. Minor performance improvement + secret = base64.b64decode(b(secret)) + + super(AzureBlobsStorageDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + + def _ex_connection_class_kwargs(self): + result = {} + + # host argument has precedence + if not self._host_argument_set: + result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX) + + return result + + def _xml_to_container(self, node): + """ + Converts a container XML node to a container instance + + :param node: XML info of the container + :type node: :class:`xml.etree.ElementTree.Element` + + :return: A container instance + :rtype: :class:`Container` + """ + + name = node.findtext(fixxpath(xpath='Name')) + props = node.find(fixxpath(xpath='Properties')) + metadata = node.find(fixxpath(xpath='Metadata')) + + extra = { + 'url': node.findtext(fixxpath(xpath='Url')), + 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')), + 'etag': props.findtext(fixxpath(xpath='Etag')), + 'lease': { + 'status': props.findtext(fixxpath(xpath='LeaseStatus')), + 'state': props.findtext(fixxpath(xpath='LeaseState')), + 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), + }, + 'meta_data': {} + } + + for meta in metadata.getchildren(): + extra['meta_data'][meta.tag] = meta.text + + return Container(name=name, extra=extra, driver=self) + + def _response_to_container(self, container_name, response): + """ + Converts a HTTP response to a container instance + + :param container_name: Name of the container + :type container_name: ``str`` + + :param response: HTTP Response + :type node: L{} + + :return: A container instance + :rtype: :class:`Container` + """ + + headers = response.headers + extra = { + 'url': 'http://%s%s' % (response.connection.host, + response.connection.action), + 'etag': headers['etag'], + 'last_modified': headers['last-modified'], + 'lease': { + 'status': headers.get('x-ms-lease-status', None), + 'state': headers.get('x-ms-lease-state', None), + 'duration': headers.get('x-ms-lease-duration', None), + }, + 'meta_data': {} + } + + for key, value in response.headers.items(): + if key.startswith('x-ms-meta-'): + key = key.split('x-ms-meta-')[1] + extra['meta_data'][key] = value + + return Container(name=container_name, extra=extra, driver=self) + + def _xml_to_object(self, container, blob): + """ + Converts a BLOB XML node to an object instance + + :param container: Instance of the container holding the blob + :type: :class:`Container` + + :param blob: XML info of the blob + :type blob: L{} + + :return: An object instance + :rtype: :class:`Object` + """ + + name = blob.findtext(fixxpath(xpath='Name')) + props = blob.find(fixxpath(xpath='Properties')) + metadata = blob.find(fixxpath(xpath='Metadata')) + etag = props.findtext(fixxpath(xpath='Etag')) + size = int(props.findtext(fixxpath(xpath='Content-Length'))) + + extra = { + 'content_type': props.findtext(fixxpath(xpath='Content-Type')), + 'etag': etag, + 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')), + 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')), + 'url': blob.findtext(fixxpath(xpath='Url')), + 'hash': props.findtext(fixxpath(xpath='Etag')), + 'lease': { + 'status': props.findtext(fixxpath(xpath='LeaseStatus')), + 'state': props.findtext(fixxpath(xpath='LeaseState')), + 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), + }, + 'content_encoding': props.findtext(fixxpath( + xpath='Content-Encoding')), + 'content_language': props.findtext(fixxpath( + xpath='Content-Language')), + 'blob_type': props.findtext(fixxpath(xpath='BlobType')) + } + + if extra['md5_hash']: + value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) + value = value.decode('ascii') + extra['md5_hash'] = value + + meta_data = {} + for meta in metadata.getchildren(): + meta_data[meta.tag] = meta.text + + return Object(name=name, size=size, hash=etag, meta_data=meta_data, + extra=extra, container=container, driver=self) + + def _response_to_object(self, object_name, container, response): + """ + Converts a HTTP response to an object (from headers) + + :param object_name: Name of the object + :type object_name: ``str`` + + :param container: Instance of the container holding the blob + :type: :class:`Container` + + :param response: HTTP Response + :type node: L{} + + :return: An object instance + :rtype: :class:`Object` + """ + + headers = response.headers + size = int(headers['content-length']) + etag = headers['etag'] + + extra = { + 'url': 'http://%s%s' % (response.connection.host, + response.connection.action), + 'etag': etag, + 'md5_hash': headers.get('content-md5', None), + 'content_type': headers.get('content-type', None), + 'content_language': headers.get('content-language', None), + 'content_encoding': headers.get('content-encoding', None), + 'last_modified': headers['last-modified'], + 'lease': { + 'status': headers.get('x-ms-lease-status', None), + 'state': headers.get('x-ms-lease-state', None), + 'duration': headers.get('x-ms-lease-duration', None), + }, + 'blob_type': headers['x-ms-blob-type'] + } + + if extra['md5_hash']: + value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) + value = value.decode('ascii') + extra['md5_hash'] = value + + meta_data = {} + for key, value in response.headers.items(): + if key.startswith('x-ms-meta-'): + key = key.split('x-ms-meta-')[1] + meta_data[key] = value + + return Object(name=object_name, size=size, hash=etag, extra=extra, + meta_data=meta_data, container=container, driver=self) + + def iterate_containers(self): + """ + @inherits: :class:`StorageDriver.iterate_containers` + """ + params = {'comp': 'list', + 'maxresults': RESPONSES_PER_REQUEST, + 'include': 'metadata'} + + while True: + response = self.connection.request('/', params) + if response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + body = response.parse_body() + containers = body.find(fixxpath(xpath='Containers')) + containers = containers.findall(fixxpath(xpath='Container')) + + for container in containers: + yield self._xml_to_container(container) + + params['marker'] = body.findtext('NextMarker') + if not params['marker']: + break + + def iterate_container_objects(self, container): + """ + @inherits: :class:`StorageDriver.iterate_container_objects` + """ + params = {'restype': 'container', + 'comp': 'list', + 'maxresults': RESPONSES_PER_REQUEST, + 'include': 'metadata'} + + container_path = self._get_container_path(container) + + while True: + response = self.connection.request(container_path, + params=params) + + if response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + elif response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + body = response.parse_body() + blobs = body.find(fixxpath(xpath='Blobs')) + blobs = blobs.findall(fixxpath(xpath='Blob')) + + for blob in blobs: + yield self._xml_to_object(container, blob) + + params['marker'] = body.findtext('NextMarker') + if not params['marker']: + break + + def get_container(self, container_name): + """ + @inherits: :class:`StorageDriver.get_container` + """ + params = {'restype': 'container'} + + container_path = '/%s' % (container_name) + + response = self.connection.request(container_path, params=params, + method='HEAD') + + if response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError('Container %s does not exist' % + (container_name), driver=self, + container_name=container_name) + elif response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + return self._response_to_container(container_name, response) + + def get_object(self, container_name, object_name): + """ + @inherits: :class:`StorageDriver.get_object` + """ + + container = self.get_container(container_name=container_name) + object_path = self._get_object_path(container, object_name) + + response = self.connection.request(object_path, method='HEAD') + + if response.status == httplib.OK: + obj = self._response_to_object(object_name, container, response) + return obj + + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=object_name) + + def _get_container_path(self, container): + """ + Return a container path + + :param container: Container instance + :type container: :class:`Container` + + :return: A path for this container. + :rtype: ``str`` + """ + return '/%s' % (container.name) + + def _get_object_path(self, container, object_name): + """ + Return an object's CDN path. + + :param container: Container instance + :type container: :class:`Container` + + :param object_name: Object name + :type object_name: :class:`str` + + :return: A path for this object. + :rtype: ``str`` + """ + container_url = self._get_container_path(container) + object_name_cleaned = urlquote(object_name) + object_path = '%s/%s' % (container_url, object_name_cleaned) + return object_path + + def create_container(self, container_name): + """ + @inherits: :class:`StorageDriver.create_container` + """ + params = {'restype': 'container'} + + container_path = '/%s' % (container_name) + response = self.connection.request(container_path, params=params, + method='PUT') + + if response.status == httplib.CREATED: + return self._response_to_container(container_name, response) + elif response.status == httplib.CONFLICT: + raise ContainerAlreadyExistsError( + value='Container with this name already exists. The name must ' + 'be unique among all the containers in the system', + container_name=container_name, driver=self) + elif response.status == httplib.BAD_REQUEST: + raise InvalidContainerNameError(value='Container name contains ' + + 'invalid characters.', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def delete_container(self, container): + """ + @inherits: :class:`StorageDriver.delete_container` + """ + # Azure does not check if the container is empty. So, we will do + # a check to ensure that the behaviour is similar to other drivers + for obj in container.iterate_objects(): + raise ContainerIsNotEmptyError( + value='Container must be empty before it can be deleted.', + container_name=container.name, driver=self) + + params = {'restype': 'container'} + container_path = self._get_container_path(container) + + # Note: All the objects in the container must be deleted first + response = self.connection.request(container_path, params=params, + method='DELETE') + + if response.status == httplib.ACCEPTED: + return True + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + return False + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + """ + @inherits: :class:`StorageDriver.download_object` + """ + obj_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(obj_path, raw=True, data=None) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={ + 'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + """ + @inherits: :class:`StorageDriver.download_object_as_stream` + """ + obj_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(obj_path, raw=True, data=None) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def _upload_in_chunks(self, response, data, iterator, object_path, + blob_type, lease, calculate_hash=True): + """ + Uploads data from an interator in fixed sized chunks to S3 + + :param response: Response object from the initial POST request + :type response: :class:`RawResponse` + + :param data: Any data from the initial POST request + :type data: ``str`` + + :param iterator: The generator for fetching the upload data + :type iterator: ``generator`` + + :param object_path: The path of the object to which we are uploading + :type object_name: ``str`` + + :param blob_type: The blob type being uploaded + :type blob_type: ``str`` + + :param lease: The lease object to be used for renewal + :type lease: :class:`AzureBlobLease` + + :keyword calculate_hash: Indicates if we must calculate the data hash + :type calculate_hash: ``bool`` + + :return: A tuple of (status, checksum, bytes transferred) + :rtype: ``tuple`` + """ + + # Get the upload id from the response xml + if response.status != httplib.CREATED: + raise LibcloudError('Error initializing upload. Code: %d' % + (response.status), driver=self) + + data_hash = None + if calculate_hash: + data_hash = self._get_hash_function() + + bytes_transferred = 0 + count = 1 + chunks = [] + headers = {} + + lease.update_headers(headers) + + if blob_type == 'BlockBlob': + params = {'comp': 'block'} + else: + params = {'comp': 'page'} + + # Read the input data in chunk sizes suitable for AWS + for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE): + data = b(data) + content_length = len(data) + offset = bytes_transferred + bytes_transferred += content_length + + if calculate_hash: + data_hash.update(data) + + chunk_hash = self._get_hash_function() + chunk_hash.update(data) + chunk_hash = base64.b64encode(b(chunk_hash.digest())) + + headers['Content-MD5'] = chunk_hash.decode('utf-8') + headers['Content-Length'] = content_length + + if blob_type == 'BlockBlob': + # Block id can be any unique string that is base64 encoded + # A 10 digit number can hold the max value of 50000 blocks + # that are allowed for azure + block_id = base64.b64encode(b('%10d' % (count))) + block_id = block_id.decode('utf-8') + params['blockid'] = block_id + + # Keep this data for a later commit + chunks.append(block_id) + else: + headers['x-ms-page-write'] = 'update' + headers['x-ms-range'] = 'bytes=%d-%d' % \ + (offset, bytes_transferred-1) + + # Renew lease before updating + lease.renew() + + resp = self.connection.request(object_path, method='PUT', + data=data, headers=headers, + params=params) + + if resp.status != httplib.CREATED: + resp.parse_error() + raise LibcloudError('Error uploading chunk %d. Code: %d' % + (count, resp.status), driver=self) + + count += 1 + + if calculate_hash: + data_hash = data_hash.hexdigest() + + if blob_type == 'BlockBlob': + self._commit_blocks(object_path, chunks, lease) + + # The Azure service does not return a hash immediately for + # chunked uploads. It takes some time for the data to get synced + response.headers['content-md5'] = None + + return (True, data_hash, bytes_transferred) + + def _commit_blocks(self, object_path, chunks, lease): + """ + Makes a final commit of the data. + + :param object_path: Server side object path. + :type object_path: ``str`` + + :param upload_id: A list of (chunk_number, chunk_hash) tuples. + :type upload_id: ``list`` + """ + + root = Element('BlockList') + + for block_id in chunks: + part = SubElement(root, 'Uncommitted') + part.text = str(block_id) + + data = tostring(root) + params = {'comp': 'blocklist'} + headers = {} + + lease.update_headers(headers) + lease.renew() + + response = self.connection.request(object_path, data=data, + params=params, headers=headers, + method='PUT') + + if response.status != httplib.CREATED: + raise LibcloudError('Error in blocklist commit', driver=self) + + def _check_values(self, blob_type, object_size): + """ + Checks if extension arguments are valid + + :param blob_type: The blob type that is being uploaded + :type blob_type: ``str`` + + :param object_size: The (max) size of the object being uploaded + :type object_size: ``int`` + """ + + if blob_type not in ['BlockBlob', 'PageBlob']: + raise LibcloudError('Invalid blob type', driver=self) + + if blob_type == 'PageBlob': + if not object_size: + raise LibcloudError('Max blob size is mandatory for page blob', + driver=self) + + if object_size % AZURE_PAGE_CHUNK_SIZE: + raise LibcloudError('Max blob size is not aligned to ' + 'page boundary', driver=self) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True, ex_blob_type=None, ex_use_lease=False): + """ + Upload an object currently located on a disk. + + @inherits: :class:`StorageDriver.upload_object` + + :param ex_blob_type: Storage class + :type ex_blob_type: ``str`` + + :param ex_use_lease: Indicates if we must take a lease before upload + :type ex_use_lease: ``bool`` + """ + + if ex_blob_type is None: + ex_blob_type = self.ex_blob_type + + # Get the size of the file + file_size = os.stat(file_path).st_size + + # The presumed size of the object + object_size = file_size + + self._check_values(ex_blob_type, file_size) + + with file(file_path, 'rb') as file_handle: + iterator = iter(file_handle) + + # If size is greater than 64MB or type is Page, upload in chunks + if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE: + # For chunked upload of block blobs, the initial size must + # be 0. + if ex_blob_type == 'BlockBlob': + object_size = None + + object_path = self._get_object_path(container, object_name) + + upload_func = self._upload_in_chunks + upload_func_kwargs = {'iterator': iterator, + 'object_path': object_path, + 'blob_type': ex_blob_type, + 'lease': None} + else: + upload_func = self._stream_data + upload_func_kwargs = {'iterator': iterator, + 'chunked': False, + 'calculate_hash': verify_hash} + + return self._put_object(container=container, + object_name=object_name, + object_size=object_size, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + file_path=file_path, extra=extra, + verify_hash=verify_hash, + blob_type=ex_blob_type, + use_lease=ex_use_lease) + + def upload_object_via_stream(self, iterator, container, object_name, + verify_hash=False, extra=None, + ex_use_lease=False, ex_blob_type=None, + ex_page_blob_size=None): + """ + @inherits: :class:`StorageDriver.upload_object_via_stream` + + :param ex_blob_type: Storage class + :type ex_blob_type: ``str`` + + :param ex_page_blob_size: The maximum size to which the + page blob can grow to + :type ex_page_blob_size: ``int`` + + :param ex_use_lease: Indicates if we must take a lease before upload + :type ex_use_lease: ``bool`` + """ + + if ex_blob_type is None: + ex_blob_type = self.ex_blob_type + + self._check_values(ex_blob_type, ex_page_blob_size) + + object_path = self._get_object_path(container, object_name) + + upload_func = self._upload_in_chunks + upload_func_kwargs = {'iterator': iterator, + 'object_path': object_path, + 'blob_type': ex_blob_type, + 'lease': None} + + return self._put_object(container=container, + object_name=object_name, + object_size=ex_page_blob_size, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, verify_hash=verify_hash, + blob_type=ex_blob_type, + use_lease=ex_use_lease) + + def delete_object(self, obj): + """ + @inherits: :class:`StorageDriver.delete_object` + """ + object_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(object_path, method='DELETE') + + if response.status == httplib.ACCEPTED: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=obj.name) + + return False + + def _update_metadata(self, headers, meta_data): + """ + Update the given metadata in the headers + + :param headers: The headers dictionary to be updated + :type headers: ``dict`` + + :param meta_data: Metadata key value pairs + :type meta_data: ``dict`` + """ + for key, value in list(meta_data.items()): + key = 'x-ms-meta-%s' % (key) + headers[key] = value + + def _prepare_upload_headers(self, object_name, object_size, + extra, meta_data, blob_type): + """ + Prepare headers for uploading an object + + :param object_name: The full name of the object being updated + :type object_name: ``str`` + + :param object_size: The size of the object. In case of PageBlobs, + this indicates the maximum size the blob can grow to + :type object_size: ``int`` + + :param extra: Extra control data for the upload + :type extra: ``dict`` + + :param meta_data: Metadata key value pairs + :type meta_data: ``dict`` + + :param blob_type: Page or Block blob type + :type blob_type: ``str`` + """ + headers = {} + + if blob_type is None: + blob_type = self.ex_blob_type + + headers['x-ms-blob-type'] = blob_type + + self._update_metadata(headers, meta_data) + + if object_size is not None: + headers['Content-Length'] = object_size + + if blob_type == 'PageBlob': + headers['Content-Length'] = 0 + headers['x-ms-blob-content-length'] = object_size + + return headers + + def _put_object(self, container, object_name, object_size, upload_func, + upload_func_kwargs, file_path=None, extra=None, + verify_hash=True, blob_type=None, use_lease=False): + """ + Control function that does the real job of uploading data to a blob + """ + extra = extra or {} + meta_data = extra.get('meta_data', {}) + content_type = extra.get('content_type', None) + + headers = self._prepare_upload_headers(object_name, object_size, + extra, meta_data, blob_type) + + object_path = self._get_object_path(container, object_name) + + # Get a lease if required and do the operations + with AzureBlobLease(self, object_path, use_lease) as lease: + if 'lease' in upload_func_kwargs: + upload_func_kwargs['lease'] = lease + + lease.update_headers(headers) + + iterator = iter('') + result_dict = self._upload_object(object_name, content_type, + upload_func, upload_func_kwargs, + object_path, headers=headers, + file_path=file_path, + iterator=iterator) + + response = result_dict['response'] + bytes_transferred = result_dict['bytes_transferred'] + data_hash = result_dict['data_hash'] + headers = response.headers + response = response.response + + if response.status != httplib.CREATED: + raise LibcloudError( + 'Unexpected status code, status_code=%s' % (response.status), + driver=self) + + server_hash = headers['content-md5'] + + if server_hash: + server_hash = binascii.hexlify(base64.b64decode(b(server_hash))) + server_hash = server_hash.decode('utf-8') + else: + # TODO: HACK - We could poll the object for a while and get + # the hash + pass + + if (verify_hash and server_hash and data_hash != server_hash): + raise ObjectHashMismatchError( + value='MD5 hash checksum does not match', + object_name=object_name, driver=self) + + return Object(name=object_name, size=bytes_transferred, + hash=headers['etag'], extra=None, + meta_data=meta_data, container=container, + driver=self) + + def ex_set_object_metadata(self, obj, meta_data): + """ + Set metadata for an object + + :param obj: The blob object + :type obj: :class:`Object` + + :param meta_data: Metadata key value pairs + :type meta_data: ``dict`` + """ + object_path = self._get_object_path(obj.container, obj.name) + params = {'comp': 'metadata'} + headers = {} + + self._update_metadata(headers, meta_data) + + response = self.connection.request(object_path, method='PUT', + params=params, + headers=headers) + + if response.status != httplib.OK: + response.parse_error('Setting metadata') diff -Nru libcloud-0.5.0/libcloud/storage/drivers/cloudfiles.py libcloud-0.15.1/libcloud/storage/drivers/cloudfiles.py --- libcloud-0.5.0/libcloud/storage/drivers/cloudfiles.py 2011-05-15 20:59:50.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/cloudfiles.py 2014-06-11 14:28:05.000000000 +0000 @@ -13,15 +13,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -import httplib -import urllib +from hashlib import sha1 +import hmac +import os +from time import time + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode try: - import json -except: import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import b +from libcloud.utils.py3 import urlquote + +if PY3: + from io import FileIO as file -from libcloud.utils import read_in_chunks +from libcloud.utils.files import read_in_chunks from libcloud.common.types import MalformedResponseError, LibcloudError from libcloud.common.base import Response, RawResponse @@ -33,17 +45,21 @@ from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.types import InvalidContainerNameError +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.openstack import OpenStackDriverMixin -from libcloud.common.rackspace import ( - AUTH_HOST_US, AUTH_HOST_UK, RackspaceBaseConnection) +from libcloud.common.rackspace import AUTH_URL CDN_HOST = 'cdn.clouddrive.com' API_VERSION = 'v1.0' +# Keys which are used to select a correct endpoint from the service catalog. +INTERNAL_ENDPOINT_KEY = 'internalURL' +PUBLIC_ENDPOINT_KEY = 'publicURL' -class CloudFilesResponse(Response): - valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT ] +class CloudFilesResponse(Response): + valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT] def success(self): i = int(self.status) @@ -78,24 +94,55 @@ return data + class CloudFilesRawResponse(CloudFilesResponse, RawResponse): pass -class CloudFilesConnection(RackspaceBaseConnection): + +class OpenStackSwiftConnection(OpenStackBaseConnection): """ - Base connection class for the Cloudfiles driver. + Connection class for the OpenStack Swift endpoint. """ responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse - auth_host = None - _url_key = "storage_url" - def __init__(self, user_id, key, secure=True): - super(CloudFilesConnection, self).__init__(user_id, key, secure=secure) + auth_url = AUTH_URL + _auth_version = '1.0' + + # TODO: Reverse the relationship - Swift -> CloudFiles + def __init__(self, user_id, key, secure=True, **kwargs): + # Ignore this for now + kwargs.pop('use_internal_url', None) + super(OpenStackSwiftConnection, self).__init__(user_id, key, + secure=secure, + **kwargs) self.api_version = API_VERSION self.accept_format = 'application/json' + self._service_type = self._ex_force_service_type or 'object-store' + self._service_name = self._ex_force_service_name or 'swift' + + if self._ex_force_service_region: + self._service_region = self._ex_force_service_region + else: + self._service_region = None + + def get_endpoint(self, *args, **kwargs): + if '2.0' in self._auth_version: + endpoint = self.service_catalog.get_endpoint( + service_type=self._service_type, + name=self._service_name, + region=self._service_region) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + endpoint = self.service_catalog.get_endpoint( + name=self._service_name, region=self._service_region) + + if PUBLIC_ENDPOINT_KEY in endpoint: + return endpoint[PUBLIC_ENDPOINT_KEY] + else: + raise LibcloudError('Could not find specified endpoint') + def request(self, action, params=None, data='', headers=None, method='GET', raw=False, cdn_request=False): if not headers: @@ -103,77 +150,145 @@ if not params: params = {} - if cdn_request: - host = self._get_host(url_key='cdn_management_url') - else: - host = None + self.cdn_request = cdn_request + params['format'] = 'json' - # Due to first-run authentication request, we may not have a path - if self.request_path: - action = self.request_path + action - params['format'] = 'json' - if method in [ 'POST', 'PUT' ]: + if method in ['POST', 'PUT'] and 'Content-Type' not in headers: headers.update({'Content-Type': 'application/json; charset=UTF-8'}) - return super(CloudFilesConnection, self).request( + return super(OpenStackSwiftConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, - raw=raw, host=host - ) + raw=raw) -class CloudFilesUSConnection(CloudFilesConnection): +class CloudFilesConnection(OpenStackSwiftConnection): """ - Connection class for the Cloudfiles US endpoint. + Base connection class for the Cloudfiles driver. """ - auth_host = AUTH_HOST_US + responseCls = CloudFilesResponse + rawResponseCls = CloudFilesRawResponse + auth_url = AUTH_URL + _auth_version = '2.0' -class CloudFilesUKConnection(CloudFilesConnection): - """ - Connection class for the Cloudfiles UK endpoint. - """ + def __init__(self, user_id, key, secure=True, + use_internal_url=False, **kwargs): + super(CloudFilesConnection, self).__init__(user_id, key, secure=secure, + **kwargs) + self.api_version = API_VERSION + self.accept_format = 'application/json' + self.cdn_request = False + self.use_internal_url = use_internal_url - auth_host = AUTH_HOST_UK + def _get_endpoint_key(self): + if self.use_internal_url: + endpoint_key = INTERNAL_ENDPOINT_KEY + else: + endpoint_key = PUBLIC_ENDPOINT_KEY + if self.cdn_request: + # cdn endpoints don't have internal urls + endpoint_key = PUBLIC_ENDPOINT_KEY + + return endpoint_key + + def get_endpoint(self): + region = self._ex_force_service_region.upper() + + if '2.0' in self._auth_version: + ep = self.service_catalog.get_endpoint( + service_type='object-store', + name='cloudFiles', + region=region) + cdn_ep = self.service_catalog.get_endpoint( + service_type='rax:object-cdn', + name='cloudFilesCDN', + region=region) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) -class CloudFilesStorageDriver(StorageDriver): - """ - Base CloudFiles driver. + # if this is a CDN request, return the cdn url instead + if self.cdn_request: + ep = cdn_ep + + endpoint_key = self._get_endpoint_key() - You should never create an instance of this class directly but use US/US - class. + if not ep: + raise LibcloudError('Could not find specified endpoint') + + if endpoint_key in ep: + return ep[endpoint_key] + else: + raise LibcloudError('Could not find specified endpoint') + + def request(self, action, params=None, data='', headers=None, method='GET', + raw=False, cdn_request=False): + if not headers: + headers = {} + if not params: + params = {} + + self.cdn_request = cdn_request + params['format'] = 'json' + + if method in ['POST', 'PUT'] and 'Content-Type' not in headers: + headers.update({'Content-Type': 'application/json; charset=UTF-8'}) + + return super(CloudFilesConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers, + raw=raw) + + +class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): + """ + CloudFiles driver. """ name = 'CloudFiles' + website = 'http://www.rackspace.com/' + connectionCls = CloudFilesConnection hash_type = 'md5' + supports_chunked_encoding = True - def list_containers(self): - response = self.connection.request('') - - if response.status == httplib.NO_CONTENT: - return [] - elif response.status == httplib.OK: - return self._to_container_list(json.loads(response.body)) + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='ord', use_internal_url=False, **kwargs): + """ + @inherits: :class:`StorageDriver.__init__` - raise LibcloudError('Unexpected status code: %s' % (response.status)) + :param region: ID of the region which should be used. + :type region: ``str`` + """ + # This is here for backard compatibility + if 'ex_force_service_region' in kwargs: + region = kwargs['ex_force_service_region'] + + self.use_internal_url = use_internal_url + OpenStackDriverMixin.__init__(self, (), **kwargs) + super(CloudFilesStorageDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, region=region, + **kwargs) - def list_container_objects(self, container): - response = self.connection.request('/%s' % (container.name)) + def iterate_containers(self): + response = self.connection.request('') if response.status == httplib.NO_CONTENT: - # Empty or inexistent container return [] elif response.status == httplib.OK: - return self._to_object_list(json.loads(response.body), container) + return self._to_container_list(json.loads(response.body)) raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container(self, container_name): - response = self.connection.request('/%s' % (container_name), - method='HEAD') + container_name_encoded = self._encode_container_name(container_name) + response = self.connection.request('/%s' % (container_name_encoded), + method='HEAD') if response.status == httplib.NO_CONTENT: container = self._headers_to_container( @@ -186,10 +301,13 @@ def get_object(self, container_name, object_name): container = self.get_container(container_name) - response = self.connection.request('/%s/%s' % (container_name, - object_name), - method='HEAD') - if response.status in [ httplib.OK, httplib.NO_CONTENT ]: + container_name_encoded = self._encode_container_name(container_name) + object_name_encoded = self._encode_object_name(object_name) + + response = self.connection.request('/%s/%s' % (container_name_encoded, + object_name_encoded), + method='HEAD') + if response.status in [httplib.OK, httplib.NO_CONTENT]: obj = self._headers_to_object( object_name, container, response.headers) return obj @@ -199,8 +317,8 @@ raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container_cdn_url(self, container): - container_name = container.name - response = self.connection.request('/%s' % (container_name), + container_name_encoded = self._encode_container_name(container.name) + response = self.connection.request('/%s' % (container_name_encoded), method='HEAD', cdn_request=True) @@ -209,7 +327,7 @@ return cdn_url elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value='', - container_name=container_name, + container_name=container.name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status)) @@ -218,27 +336,37 @@ container_cdn_url = self.get_container_cdn_url(container=obj.container) return '%s/%s' % (container_cdn_url, obj.name) - def enable_container_cdn(self, container): + def enable_container_cdn(self, container, ex_ttl=None): + """ + @inherits: :class:`StorageDriver.enable_container_cdn` + + :param ex_ttl: cache time to live + :type ex_ttl: ``int`` + """ container_name = container.name + headers = {'X-CDN-Enabled': 'True'} + + if ex_ttl: + headers['X-TTL'] = ex_ttl + response = self.connection.request('/%s' % (container_name), method='PUT', + headers=headers, cdn_request=True) - if response.status in [ httplib.CREATED, httplib.ACCEPTED ]: - return True - - return False + return response.status in [httplib.CREATED, httplib.ACCEPTED] def create_container(self, container_name): - container_name = self._clean_container_name(container_name) + container_name_encoded = self._encode_container_name(container_name) response = self.connection.request( - '/%s' % (container_name), method='PUT') + '/%s' % (container_name_encoded), method='PUT') if response.status == httplib.CREATED: # Accepted mean that container is not yet created but it will be # eventually - extra = { 'object_count': 0 } - container = Container(name=container_name, extra=extra, driver=self) + extra = {'object_count': 0} + container = Container(name=container_name, + extra=extra, driver=self) return container elif response.status == httplib.ACCEPTED: @@ -248,7 +376,7 @@ raise LibcloudError('Unexpected status code: %s' % (response.status)) def delete_container(self, container): - name = self._clean_container_name(container.name) + name = self._encode_container_name(container.name) # Only empty container can be deleted response = self.connection.request('/%s' % (name), method='DELETE') @@ -271,14 +399,14 @@ object_name), method='GET', raw=True) - return self._get_object(obj=obj, callback=self._save_object, - response=response, - callback_kwargs={'obj': obj, - 'response': response.response, - 'destination_path': destination_path, - 'overwrite_existing': overwrite_existing, - 'delete_on_failure': delete_on_failure}, - success_status_code=httplib.OK) + return self._get_object( + obj=obj, callback=self._save_object, response=response, + callback_kwargs={'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): container_name = obj.container.name @@ -289,7 +417,7 @@ return self._get_object(obj=obj, callback=read_in_chunks, response=response, - callback_kwargs={ 'iterator': response.response, + callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) @@ -301,7 +429,7 @@ Note: This will override file with a same name if it already exists. """ upload_func = self._upload_file - upload_func_kwargs = { 'file_path': file_path } + upload_func_kwargs = {'file_path': file_path} return self._put_object(container=container, object_name=object_name, upload_func=upload_func, @@ -315,7 +443,7 @@ iterator = iter(iterator) upload_func = self._stream_data - upload_func_kwargs = { 'iterator': iterator } + upload_func_kwargs = {'iterator': iterator} return self._put_object(container=container, object_name=object_name, upload_func=upload_func, @@ -323,8 +451,8 @@ extra=extra, iterator=iterator) def delete_object(self, obj): - container_name = self._clean_container_name(obj.container.name) - object_name = self._clean_object_name(obj.name) + container_name = self._encode_container_name(obj.container.name) + object_name = self._encode_object_name(obj.name) response = self.connection.request( '/%s/%s' % (container_name, object_name), method='DELETE') @@ -337,7 +465,32 @@ raise LibcloudError('Unexpected status code: %s' % (response.status)) + def ex_purge_object_from_cdn(self, obj, email=None): + """ + Purge edge cache for the specified object. + + :param email: Email where a notification will be sent when the job + completes. (optional) + :type email: ``str`` + """ + container_name = self._encode_container_name(obj.container.name) + object_name = self._encode_object_name(obj.name) + headers = {'X-Purge-Email': email} if email else {} + + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='DELETE', + headers=headers, + cdn_request=True) + + return response.status == httplib.NO_CONTENT + def ex_get_meta_data(self): + """ + Get meta data + + :rtype: ``dict`` + """ response = self.connection.request('', method='HEAD') if response.status == httplib.NO_CONTENT: @@ -347,37 +500,283 @@ 'x-account-object-count', 'unknown') bytes_used = response.headers.get( 'x-account-bytes-used', 'unknown') + temp_url_key = response.headers.get( + 'x-account-meta-temp-url-key', None) - return { 'container_count': int(container_count), - 'object_count': int(object_count), - 'bytes_used': int(bytes_used) } + return {'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used), + 'temp_url_key': temp_url_key} raise LibcloudError('Unexpected status code: %s' % (response.status)) + def ex_multipart_upload_object(self, file_path, container, object_name, + chunk_size=33554432, extra=None, + verify_hash=True): + object_size = os.path.getsize(file_path) + if object_size < chunk_size: + return self.upload_object(file_path, container, object_name, + extra=extra, verify_hash=verify_hash) + + iter_chunk_reader = FileChunkReader(file_path, chunk_size) + + for index, iterator in enumerate(iter_chunk_reader): + self._upload_object_part(container=container, + object_name=object_name, + part_number=index, + iterator=iterator, + verify_hash=verify_hash) + + return self._upload_object_manifest(container=container, + object_name=object_name, + extra=extra, + verify_hash=verify_hash) + + def ex_enable_static_website(self, container, index_file='index.html'): + """ + Enable serving a static website. + + :param container: Container instance + :type container: :class:`Container` + + :param index_file: Name of the object which becomes an index page for + every sub-directory in this container. + :type index_file: ``str`` + + :rtype: ``bool`` + """ + container_name = container.name + headers = {'X-Container-Meta-Web-Index': index_file} + + response = self.connection.request('/%s' % (container_name), + method='POST', + headers=headers, + cdn_request=False) + + return response.status in [httplib.CREATED, httplib.ACCEPTED] + + def ex_set_error_page(self, container, file_name='error.html'): + """ + Set a custom error page which is displayed if file is not found and + serving of a static website is enabled. + + :param container: Container instance + :type container: :class:`Container` + + :param file_name: Name of the object which becomes the error page. + :type file_name: ``str`` + + :rtype: ``bool`` + """ + container_name = container.name + headers = {'X-Container-Meta-Web-Error': file_name} + + response = self.connection.request('/%s' % (container_name), + method='POST', + headers=headers, + cdn_request=False) + + return response.status in [httplib.CREATED, httplib.ACCEPTED] + + def ex_set_account_metadata_temp_url_key(self, key): + """ + Set the metadata header X-Account-Meta-Temp-URL-Key on your Cloud + Files account. + + :param key: X-Account-Meta-Temp-URL-Key + :type key: ``str`` + + :rtype: ``bool`` + """ + headers = {'X-Account-Meta-Temp-URL-Key': key} + + response = self.connection.request('', + method='POST', + headers=headers, + cdn_request=False) + + return response.status in [httplib.OK, httplib.NO_CONTENT, + httplib.CREATED, httplib.ACCEPTED] + + def ex_get_object_temp_url(self, obj, method='GET', timeout=60): + """ + Create a temporary URL to allow others to retrieve or put objects + in your Cloud Files account for as long or as short a time as you + wish. This method is specifically for allowing users to retrieve + or update an object. + + :param obj: The object that you wish to make temporarily public + :type obj: :class:`Object` + + :param method: Which method you would like to allow, 'PUT' or 'GET' + :type method: ``str`` + + :param timeout: Time (in seconds) after which you want the TempURL + to expire. + :type timeout: ``int`` + + :rtype: ``bool`` + """ + self.connection._populate_hosts_and_request_paths() + expires = int(time() + timeout) + path = '%s/%s/%s' % (self.connection.request_path, + obj.container.name, obj.name) + try: + key = self.ex_get_meta_data()['temp_url_key'] + assert key is not None + except Exception: + raise KeyError('You must first set the ' + + 'X-Account-Meta-Temp-URL-Key header on your ' + + 'Cloud Files account using ' + + 'ex_set_account_metadata_temp_url_key before ' + + 'you can use this method.') + hmac_body = '%s\n%s\n%s' % (method, expires, path) + sig = hmac.new(b(key), b(hmac_body), sha1).hexdigest() + params = urlencode({'temp_url_sig': sig, + 'temp_url_expires': expires}) + + temp_url = 'https://%s/%s/%s?%s' %\ + (self.connection.host + self.connection.request_path, + obj.container.name, obj.name, params) + + return temp_url + + def _upload_object_part(self, container, object_name, part_number, + iterator, verify_hash=True): + upload_func = self._stream_data + upload_func_kwargs = {'iterator': iterator} + part_name = object_name + '/%08d' % part_number + extra = {'content_type': 'application/octet-stream'} + + self._put_object(container=container, + object_name=part_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, iterator=iterator, + verify_hash=verify_hash) + + def _upload_object_manifest(self, container, object_name, extra=None, + verify_hash=True): + extra = extra or {} + meta_data = extra.get('meta_data') + + container_name_encoded = self._encode_container_name(container.name) + object_name_encoded = self._encode_object_name(object_name) + request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) + + headers = {'X-Auth-Token': self.connection.auth_token, + 'X-Object-Manifest': '%s/%s/' % + (container_name_encoded, + object_name_encoded)} + + data = '' + response = self.connection.request(request_path, + method='PUT', data=data, + headers=headers, raw=True) + + object_hash = None + + if verify_hash: + hash_function = self._get_hash_function() + hash_function.update(b(data)) + data_hash = hash_function.hexdigest() + object_hash = response.headers.get('etag') + + if object_hash != data_hash: + raise ObjectHashMismatchError( + value=('MD5 hash checksum does not match (expected=%s, ' + + 'actual=%s)') % + (data_hash, object_hash), + object_name=object_name, driver=self) + + obj = Object(name=object_name, size=0, hash=object_hash, extra=None, + meta_data=meta_data, container=container, driver=self) + + return obj + + def list_container_objects(self, container, ex_prefix=None): + """ + Return a list of objects for the given container. + + :param container: Container instance. + :type container: :class:`Container` + + :param ex_prefix: Only get objects with names starting with ex_prefix + :type ex_prefix: ``str`` + + :return: A list of Object instances. + :rtype: ``list`` of :class:`Object` + """ + return list(self.iterate_container_objects(container, + ex_prefix=ex_prefix)) + + def iterate_container_objects(self, container, ex_prefix=None): + """ + Return a generator of objects for the given container. + + :param container: Container instance + :type container: :class:`Container` + + :param ex_prefix: Only get objects with names starting with ex_prefix + :type ex_prefix: ``str`` + + :return: A generator of Object instances. + :rtype: ``generator`` of :class:`Object` + """ + params = {} + if ex_prefix: + params['prefix'] = ex_prefix + + while True: + container_name_encoded = \ + self._encode_container_name(container.name) + response = self.connection.request('/%s' % + (container_name_encoded), + params=params) + + if response.status == httplib.NO_CONTENT: + # Empty or non-existent container + break + elif response.status == httplib.OK: + objects = self._to_object_list(json.loads(response.body), + container) + + if len(objects) == 0: + break + + for obj in objects: + yield obj + params['marker'] = obj.name + + else: + raise LibcloudError('Unexpected status code: %s' % + (response.status)) + def _put_object(self, container, object_name, upload_func, upload_func_kwargs, extra=None, file_path=None, iterator=None, verify_hash=True): extra = extra or {} - container_name_cleaned = self._clean_container_name(container.name) - object_name_cleaned = self._clean_object_name(object_name) + container_name_encoded = self._encode_container_name(container.name) + object_name_encoded = self._encode_object_name(object_name) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) + content_disposition = extra.get('content_disposition', None) headers = {} if meta_data: - for key, value in meta_data.iteritems(): + for key, value in list(meta_data.items()): key = 'X-Object-Meta-%s' % (key) headers[key] = value - request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) - result_dict = self._upload_object(object_name=object_name, - content_type=content_type, - upload_func=upload_func, - upload_func_kwargs=upload_func_kwargs, - request_path=request_path, - request_method='PUT', - headers=headers, file_path=file_path, - iterator=iterator) + if content_disposition is not None: + headers['Content-Disposition'] = content_disposition + + request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) + result_dict = self._upload_object( + object_name=object_name, content_type=content_type, + upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, + request_path=request_path, request_method='PUT', + headers=headers, file_path=file_path, iterator=iterator) response = result_dict['response'].response bytes_transferred = result_dict['bytes_transferred'] @@ -406,13 +805,13 @@ raise LibcloudError('status_code=%s' % (response.status), driver=self) - def _clean_container_name(self, name): + def _encode_container_name(self, name): """ - Clean container name. + Encode container name so it can be used as part of the HTTP request. """ if name.startswith('/'): name = name[1:] - name = urllib.quote(name) + name = urlquote(name) if name.find('/') != -1: raise InvalidContainerNameError(value='Container name cannot' @@ -420,28 +819,22 @@ container_name=name, driver=self) if len(name) > 256: - raise InvalidContainerNameError(value='Container name cannot be' - ' longer than 256 bytes', - container_name=name, driver=self) - + raise InvalidContainerNameError( + value='Container name cannot be longer than 256 bytes', + container_name=name, driver=self) return name - def _clean_object_name(self, name): - name = urllib.quote(name) + def _encode_object_name(self, name): + name = urlquote(name) return name def _to_container_list(self, response): - # @TODO: Handle more then 10k containers - use "lazy list"? - containers = [] - + # @TODO: Handle more than 10k containers - use "lazy list"? for container in response: - extra = { 'object_count': int(container['count']), - 'size': int(container['bytes'])} - containers.append(Container(name=container['name'], extra=extra, - driver=self)) - - return containers + extra = {'object_count': int(container['count']), + 'size': int(container['bytes'])} + yield Container(name=container['name'], extra=extra, driver=self) def _to_object_list(self, response, container): objects = [] @@ -450,8 +843,8 @@ name = obj['name'] size = int(obj['bytes']) hash = obj['hash'] - extra = { 'content_type': obj['content_type'], - 'last_modified': obj['last_modified'] } + extra = {'content_type': obj['content_type'], + 'last_modified': obj['last_modified']} objects.append(Object( name=name, size=size, hash=hash, extra=extra, meta_data=None, container=container, driver=self)) @@ -462,8 +855,8 @@ size = int(headers.get('x-container-bytes-used', 0)) object_count = int(headers.get('x-container-object-count', 0)) - extra = { 'object_count': object_count, - 'size': size } + extra = {'object_count': object_count, + 'size': size} container = Container(name=name, extra=extra, driver=self) return container @@ -474,17 +867,24 @@ content_type = headers.pop('content-type', None) meta_data = {} - for key, value in headers.iteritems(): + for key, value in list(headers.items()): if key.find('x-object-meta-') != -1: key = key.replace('x-object-meta-', '') meta_data[key] = value - extra = { 'content_type': content_type, 'last_modified': last_modified } + extra = {'content_type': content_type, 'last_modified': last_modified} obj = Object(name=name, size=size, hash=etag, extra=extra, meta_data=meta_data, container=container, driver=self) return obj + def _ex_connection_class_kwargs(self): + kwargs = self.openstack_connection_kwargs() + kwargs['ex_force_service_region'] = self.region + kwargs['use_internal_url'] = self.use_internal_url + return kwargs + + class CloudFilesUSStorageDriver(CloudFilesStorageDriver): """ Cloudfiles storage driver for the US endpoint. @@ -492,7 +892,32 @@ type = Provider.CLOUDFILES_US name = 'CloudFiles (US)' - connectionCls = CloudFilesUSConnection + + def __init__(self, *args, **kwargs): + kwargs['region'] = 'ord' + super(CloudFilesUSStorageDriver, self).__init__(*args, **kwargs) + + +class OpenStackSwiftStorageDriver(CloudFilesStorageDriver): + """ + Storage driver for the OpenStack Swift. + """ + type = Provider.CLOUDFILES_SWIFT + name = 'OpenStack Swift' + connectionCls = OpenStackSwiftConnection + + # TODO: Reverse the relationship - Swift -> CloudFiles + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region=None, **kwargs): + super(OpenStackSwiftStorageDriver, self).__init__(key=key, + secret=secret, + secure=secure, + host=host, + port=port, + region=region, + **kwargs) + class CloudFilesUKStorageDriver(CloudFilesStorageDriver): """ @@ -501,4 +926,69 @@ type = Provider.CLOUDFILES_UK name = 'CloudFiles (UK)' - connectionCls = CloudFilesUKConnection + + def __init__(self, *args, **kwargs): + kwargs['region'] = 'lon' + super(CloudFilesUKStorageDriver, self).__init__(*args, **kwargs) + + +class FileChunkReader(object): + def __init__(self, file_path, chunk_size): + self.file_path = file_path + self.total = os.path.getsize(file_path) + self.chunk_size = chunk_size + self.bytes_read = 0 + self.stop_iteration = False + + def __iter__(self): + return self + + def next(self): + if self.stop_iteration: + raise StopIteration + + start_block = self.bytes_read + end_block = start_block + self.chunk_size + if end_block >= self.total: + end_block = self.total + self.stop_iteration = True + self.bytes_read += end_block - start_block + return ChunkStreamReader(file_path=self.file_path, + start_block=start_block, + end_block=end_block, + chunk_size=8192) + + def __next__(self): + return self.next() + + +class ChunkStreamReader(object): + def __init__(self, file_path, start_block, end_block, chunk_size): + self.fd = open(file_path, 'rb') + self.fd.seek(start_block) + self.start_block = start_block + self.end_block = end_block + self.chunk_size = chunk_size + self.bytes_read = 0 + self.stop_iteration = False + + def __iter__(self): + return self + + def next(self): + if self.stop_iteration: + self.fd.close() + raise StopIteration + + block_size = self.chunk_size + if self.bytes_read + block_size > \ + self.end_block - self.start_block: + block_size = self.end_block - self.start_block - self.bytes_read + self.stop_iteration = True + + block = self.fd.read(block_size) + self.bytes_read += block_size + return block + + def __next__(self): + return self.next() diff -Nru libcloud-0.5.0/libcloud/storage/drivers/dummy.py libcloud-0.15.1/libcloud/storage/drivers/dummy.py --- libcloud-0.5.0/libcloud/storage/drivers/dummy.py 2011-05-14 23:01:55.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/dummy.py 2013-11-29 12:35:04.000000000 +0000 @@ -17,6 +17,12 @@ import random import hashlib +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import b + +if PY3: + from io import FileIO as file + from libcloud.common.types import LibcloudError from libcloud.storage.base import Object, Container, StorageDriver @@ -47,6 +53,7 @@ def __len__(self): return self._yield_count * self._chunk_len + class DummyIterator(object): def __init__(self, data=None): self.hash = hashlib.md5() @@ -61,10 +68,14 @@ raise StopIteration value = self._data[self._current_item] - self.hash.update(value) + self.hash.update(b(value)) self._current_item += 1 return value + def __next__(self): + return self.next() + + class DummyStorageDriver(StorageDriver): """ Dummy Storage driver. @@ -81,61 +92,87 @@ """ name = 'Dummy Storage Provider' + website = 'http://example.com' def __init__(self, api_key, api_secret): + """ + :param api_key: API key or username to used (required) + :type api_key: ``str`` + :param api_secret: Secret password to be used (required) + :type api_secret: ``str`` + :rtype: ``None`` + """ self._containers = {} def get_meta_data(self): """ >>> driver = DummyStorageDriver('key', 'secret') - >>> driver.get_meta_data() - {'object_count': 0, 'container_count': 0, 'bytes_used': 0} - >>> container = driver.create_container(container_name='test container 1') - >>> container = driver.create_container(container_name='test container 2') + >>> driver.get_meta_data()['object_count'] + 0 + >>> driver.get_meta_data()['container_count'] + 0 + >>> driver.get_meta_data()['bytes_used'] + 0 + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) + >>> container_name = 'test container 2' + >>> container = driver.create_container(container_name=container_name) >>> obj = container.upload_object_via_stream( - ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) - >>> driver.get_meta_data() - {'object_count': 1, 'container_count': 2, 'bytes_used': 50} + ... object_name='test object', iterator=DummyFileObject(5, 10), + ... extra={}) + >>> driver.get_meta_data()['object_count'] + 1 + >>> driver.get_meta_data()['container_count'] + 2 + >>> driver.get_meta_data()['bytes_used'] + 50 + + :rtype: ``dict`` """ container_count = len(self._containers) - object_count = sum([ len(self._containers[container]['objects']) for - container in self._containers ]) + object_count = sum([len(self._containers[container]['objects']) for + container in self._containers]) bytes_used = 0 for container in self._containers: objects = self._containers[container]['objects'] - for _, obj in objects.iteritems(): + for _, obj in objects.items(): bytes_used += obj.size - return { 'container_count': int(container_count), - 'object_count': int(object_count), - 'bytes_used': int(bytes_used) } + return {'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used)} - def list_containers(self): + def iterate_containers(self): """ >>> driver = DummyStorageDriver('key', 'secret') - >>> driver.list_containers() + >>> list(driver.iterate_containers()) [] - >>> container = driver.create_container(container_name='test container 1') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' - >>> container = driver.create_container(container_name='test container 2') + >>> container_name = 'test container 2' + >>> container = driver.create_container(container_name=container_name) >>> container >>> container = driver.create_container( - ... container_name='test container 2') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 2') + ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerAlreadyExistsError: - >>> container_list=driver.list_containers() - >>> sorted([container.name for container in container_list]) + >>> container_list=list(driver.iterate_containers()) + >>> sorted([c.name for c in container_list]) ['test container 1', 'test container 2'] + + @inherits: :class:`StorageDriver.iterate_containers` """ - return [container['container'] for container in - self._containers.values()] + for container in list(self._containers.values()): + yield container['container'] def list_container_objects(self, container): container = self.get_container(container.name) @@ -148,13 +185,16 @@ >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: - >>> container = driver.create_container(container_name='test container 1') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' >>> driver.get_container('test container 1') + + @inherits: :class:`StorageDriver.get_container` """ if container_name not in self._containers: @@ -169,13 +209,16 @@ >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: - >>> container = driver.create_container(container_name='test container 1') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) >>> container >>> container.name 'test container 1' >>> container.get_cdn_url() 'http://www.test.com/container/test_container_1' + + @inherits: :class:`StorageDriver.get_container_cdn_url` """ if container.name not in self._containers: @@ -185,80 +228,94 @@ return self._containers[container.name]['cdn_url'] def get_object(self, container_name, object_name): - """ - >>> driver = DummyStorageDriver('key', 'secret') - >>> driver.get_object('unknown', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ContainerDoesNotExistError: - >>> container = driver.create_container(container_name='test container 1') - >>> container - - >>> driver.get_object( - ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ObjectDoesNotExistError: - >>> obj = container.upload_object_via_stream(object_name='test object', - ... iterator=DummyFileObject(5, 10), extra={}) - >>> obj - - """ - - self.get_container(container_name) - container_objects = self._containers[container_name]['objects'] - if object_name not in container_objects: + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_object('unknown', 'unknown') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) + >>> container + + >>> driver.get_object( + ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ObjectDoesNotExistError: + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj.name + 'test object' + >>> obj.size + 50 + + @inherits: :class:`StorageDriver.get_object` + """ + + self.get_container(container_name) + container_objects = self._containers[container_name]['objects'] + if object_name not in container_objects: raise ObjectDoesNotExistError(object_name=object_name, value=None, driver=self) - return container_objects[object_name] + return container_objects[object_name] def get_object_cdn_url(self, obj): - """ - >>> driver = DummyStorageDriver('key', 'secret') - >>> container = driver.create_container(container_name='test container 1') - >>> container - - >>> obj = container.upload_object_via_stream(object_name='test object 5', - ... iterator=DummyFileObject(5, 10), extra={}) - >>> obj - - >>> obj.get_cdn_url() - 'http://www.test.com/object/test_object_5' - """ - - container_name = obj.container.name - container_objects = self._containers[container_name]['objects'] - if obj.name not in container_objects: - raise ObjectDoesNotExistError(object_name=obj.name, value=None, - driver=self) + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) + >>> container + + >>> obj = container.upload_object_via_stream( + ... object_name='test object 5', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj.name + 'test object 5' + >>> obj.get_cdn_url() + 'http://www.test.com/object/test_object_5' - return container_objects[obj.name].meta_data['cdn_url'] + @inherits: :class:`StorageDriver.get_object_cdn_url` + """ + container_name = obj.container.name + container_objects = self._containers[container_name]['objects'] + if obj.name not in container_objects: + raise ObjectDoesNotExistError(object_name=obj.name, value=None, + driver=self) + + return container_objects[obj.name].meta_data['cdn_url'] def create_container(self, container_name): """ >>> driver = DummyStorageDriver('key', 'secret') - >>> container = driver.create_container(container_name='test container 1') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) >>> container >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerAlreadyExistsError: + + @inherits: :class:`StorageDriver.create_container` """ if container_name in self._containers: raise ContainerAlreadyExistsError(container_name=container_name, value=None, driver=self) - extra = { 'object_count': 0 } + extra = {'object_count': 0} container = Container(name=container_name, extra=extra, driver=self) - self._containers[container_name] = { 'container': container, - 'objects': {}, - 'cdn_url': - 'http://www.test.com/container/%s' % - (container_name.replace(' ', '_')) - } + self._containers[container_name] = {'container': container, + 'objects': {}, + 'cdn_url': + 'http://www.test.com/container/%s' + % + (container_name.replace(' ', '_')) + } return container def delete_container(self, container): @@ -266,11 +323,13 @@ >>> driver = DummyStorageDriver('key', 'secret') >>> container = Container(name = 'test container', ... extra={'object_count': 0}, driver=driver) - >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + >>> driver.delete_container(container=container) + ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> len(driver._containers) 1 >>> driver.delete_container(container=container) @@ -278,12 +337,17 @@ >>> len(driver._containers) 0 >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( - ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) - >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + ... object_name='test object', iterator=DummyFileObject(5, 10), + ... extra={}) + >>> driver.delete_container(container=container) + ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerIsNotEmptyError: + + @inherits: :class:`StorageDriver.delete_container` """ container_name = container.name @@ -300,12 +364,12 @@ return True def download_object(self, obj, destination_path, overwrite_existing=False, - delete_on_failure=True): - kwargs_dict = {'obj': obj, - 'response': DummyFileObject(), - 'destination_path': destination_path, - 'overwrite_existing': overwrite_existing, - 'delete_on_failure': delete_on_failure} + delete_on_failure=True): + kwargs_dict = {'obj': obj, + 'response': DummyFileObject(), + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure} return self._save_object(**kwargs_dict) @@ -313,12 +377,15 @@ """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream(object_name='test object', ... iterator=DummyFileObject(5, 10), extra={}) >>> stream = container.download_object_as_stream(obj) >>> stream #doctest: +ELLIPSIS - ' at 0x...> + <...closed...> + + @inherits: :class:`StorageDriver.download_object_as_stream` """ return DummyFileObject() @@ -327,18 +394,24 @@ file_hash=None): """ >>> driver = DummyStorageDriver('key', 'secret') - >>> container = driver.create_container(container_name='test container 1') + >>> container_name = 'test container 1' + >>> container = driver.create_container(container_name=container_name) >>> container.upload_object(file_path='/tmp/inexistent.file', ... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): LibcloudError: >>> file_path = path = os.path.abspath(__file__) >>> file_size = os.path.getsize(file_path) - >>> obj = container.upload_object(file_path=file_path, object_name='test') + >>> obj = container.upload_object(file_path=file_path, + ... object_name='test') >>> obj #doctest: +ELLIPSIS >>> obj.size == file_size True + + @inherits: :class:`StorageDriver.upload_object` + :param file_hash: File hash + :type file_hash: ``str`` """ if not os.path.exists(file_path): @@ -354,11 +427,15 @@ """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( - ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + ... object_name='test object', iterator=DummyFileObject(5, 10), + ... extra={}) >>> obj #doctest: +ELLIPSIS + + @inherits: :class:`StorageDriver.upload_object_via_stream` """ size = len(iterator) @@ -369,7 +446,8 @@ """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = driver.create_container( - ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + ... container_name='test container 1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream(object_name='test object', ... iterator=DummyFileObject(5, 10), extra={}) >>> obj #doctest: +ELLIPSIS @@ -382,6 +460,8 @@ >>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ObjectDoesNotExistError: + + @inherits: :class:`StorageDriver.delete_object` """ container_name = obj.container.name diff -Nru libcloud-0.5.0/libcloud/storage/drivers/google_storage.py libcloud-0.15.1/libcloud/storage/drivers/google_storage.py --- libcloud-0.5.0/libcloud/storage/drivers/google_storage.py 2011-04-07 00:23:06.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/google_storage.py 2014-06-11 14:27:59.000000000 +0000 @@ -13,4 +13,124 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 +import copy +import hmac +from email.utils import formatdate +from hashlib import sha1 + +from libcloud.utils.py3 import b + +from libcloud.common.base import ConnectionUserAndKey + +from libcloud.storage.drivers.s3 import BaseS3StorageDriver, S3Response +from libcloud.storage.drivers.s3 import S3RawResponse + +SIGNATURE_IDENTIFIER = 'GOOG1' + +# Docs are a lie. Actual namespace returned is different that the one listed in +# the docs. +AUTH_HOST = 'commondatastorage.googleapis.com' +API_VERSION = '2006-03-01' +NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION) + + +class GoogleStorageConnection(ConnectionUserAndKey): + """ + Repersents a single connection to the Google storage API endpoint. + """ + + host = AUTH_HOST + responseCls = S3Response + rawResponseCls = S3RawResponse + + def add_default_headers(self, headers): + date = formatdate(usegmt=True) + headers['Date'] = date + return headers + + def pre_connect_hook(self, params, headers): + signature = self._get_aws_auth_param(method=self.method, + headers=headers, + params=params, + expires=None, + secret_key=self.key, + path=self.action) + headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER, + self.user_id, signature) + return params, headers + + def _get_aws_auth_param(self, method, headers, params, expires, + secret_key, path='/'): + # TODO: Refactor and re-use in S3 driver + """ + Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, + UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + CanonicalizedHeaders + + CanonicalizedResource; + """ + special_header_keys = ['content-md5', 'content-type', 'date'] + special_header_values = {} + extension_header_values = {} + + headers_copy = copy.deepcopy(headers) + for key, value in list(headers_copy.items()): + if key.lower() in special_header_keys: + if key.lower() == 'date': + value = value.strip() + else: + value = value.lower().strip() + special_header_values[key.lower()] = value + elif key.lower().startswith('x-goog-'): + extension_header_values[key.lower()] = value.strip() + + if 'content-md5' not in special_header_values: + special_header_values['content-md5'] = '' + + if 'content-type' not in special_header_values: + special_header_values['content-type'] = '' + + keys_sorted = list(special_header_values.keys()) + keys_sorted.sort() + + buf = [method] + for key in keys_sorted: + value = special_header_values[key] + buf.append(value) + string_to_sign = '\n'.join(buf) + + keys_sorted = list(extension_header_values.keys()) + keys_sorted.sort() + + extension_header_string = [] + for key in keys_sorted: + value = extension_header_values[key] + extension_header_string.append('%s:%s' % (key, value)) + extension_header_string = '\n'.join(extension_header_string) + + values_to_sign = [] + for value in [string_to_sign, extension_header_string, path]: + if value: + values_to_sign.append(value) + + string_to_sign = '\n'.join(values_to_sign) + b64_hmac = base64.b64encode( + hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() + ) + return b64_hmac.decode('utf-8') + + +class GoogleStorageDriver(BaseS3StorageDriver): + name = 'Google Storage' + website = 'http://cloud.google.com/' + connectionCls = GoogleStorageConnection + hash_type = 'md5' + namespace = NAMESPACE + supports_chunked_encoding = False + supports_s3_multipart_upload = False diff -Nru libcloud-0.5.0/libcloud/storage/drivers/ktucloud.py libcloud-0.15.1/libcloud/storage/drivers/ktucloud.py --- libcloud-0.5.0/libcloud/storage/drivers/ktucloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/ktucloud.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError +from libcloud.storage.providers import Provider + +from libcloud.storage.drivers.cloudfiles import CloudFilesConnection +from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver + +KTUCLOUDSTORAGE_AUTH_URL = "https://ssproxy.ucloudbiz.olleh.com/auth/v1.0" +KTUCLOUDSTORAGE_API_VERSION = "1.0" + + +class KTUCloudStorageConnection(CloudFilesConnection): + """ + Connection class for the KT UCloud Storage endpoint. + """ + + auth_url = KTUCLOUDSTORAGE_AUTH_URL + _auth_version = KTUCLOUDSTORAGE_API_VERSION + + def get_endpoint(self): + eps = self.service_catalog.get_endpoints(name='cloudFiles') + if len(eps) == 0: + raise LibcloudError('Could not find specified endpoint') + ep = eps[0] + if 'publicURL' in ep: + return ep['publicURL'] + else: + raise LibcloudError('Could not find specified endpoint') + + +class KTUCloudStorageDriver(CloudFilesStorageDriver): + """ + Cloudfiles storage driver for the UK endpoint. + """ + + type = Provider.KTUCLOUD + name = 'KTUCloud Storage' + connectionCls = KTUCloudStorageConnection diff -Nru libcloud-0.5.0/libcloud/storage/drivers/local.py libcloud-0.15.1/libcloud/storage/drivers/local.py --- libcloud-0.5.0/libcloud/storage/drivers/local.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/local.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,600 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides storage driver for working with local filesystem +""" + +from __future__ import with_statement + +import errno +import os +import shutil +import sys + +try: + import lockfile + from lockfile import LockTimeout, mkdirlockfile +except ImportError: + raise ImportError('Missing lockfile dependency, you can install it ' + 'using pip: pip install lockfile') + +from libcloud.utils.files import read_in_chunks +from libcloud.utils.py3 import relpath +from libcloud.utils.py3 import u +from libcloud.common.base import Connection +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.common.types import LibcloudError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import InvalidContainerNameError + +IGNORE_FOLDERS = ['.lock', '.hash'] + + +class LockLocalStorage(object): + """ + A class to help in locking a local path before being updated + """ + def __init__(self, path): + self.path = path + self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True) + + def __enter__(self): + try: + self.lock.acquire(timeout=0.1) + except LockTimeout: + raise LibcloudError('Lock timeout') + + def __exit__(self, type, value, traceback): + if self.lock.is_locked(): + self.lock.release() + + if value is not None: + raise value + + +class LocalStorageDriver(StorageDriver): + """ + Implementation of local file-system based storage. This is helpful + where the user would want to use the same code (using libcloud) and + switch between cloud storage and local storage + """ + + connectionCls = Connection + name = 'Local Storage' + website = 'http://example.com' + hash_type = 'md5' + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + **kwargs): + + # Use the key as the path to the storage + self.base_path = key + + if not os.path.isdir(self.base_path): + raise LibcloudError('The base path is not a directory') + + super(StorageDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + + def _make_path(self, path, ignore_existing=True): + """ + Create a path by checking if it already exists + """ + + try: + os.makedirs(path) + except OSError: + exp = sys.exc_info()[1] + if exp.errno == errno.EEXIST and not ignore_existing: + raise exp + + def _check_container_name(self, container_name): + """ + Check if the container name is valid + + :param container_name: Container name + :type container_name: ``str`` + """ + + if '/' in container_name or '\\' in container_name: + raise InvalidContainerNameError(value=None, driver=self, + container_name=container_name) + + def _make_container(self, container_name): + """ + Create a container instance + + :param container_name: Container name. + :type container_name: ``str`` + + :return: Container instance. + :rtype: :class:`Container` + """ + + self._check_container_name(container_name) + + full_path = os.path.join(self.base_path, container_name) + + try: + stat = os.stat(full_path) + if not os.path.isdir(full_path): + raise OSError('Target path is not a directory') + except OSError: + raise ContainerDoesNotExistError(value=None, driver=self, + container_name=container_name) + + extra = {} + extra['creation_time'] = stat.st_ctime + extra['access_time'] = stat.st_atime + extra['modify_time'] = stat.st_mtime + + return Container(name=container_name, extra=extra, driver=self) + + def _make_object(self, container, object_name): + """ + Create an object instance + + :param container: Container. + :type container: :class:`Container` + + :param object_name: Object name. + :type object_name: ``str`` + + :return: Object instance. + :rtype: :class:`Object` + """ + + full_path = os.path.join(self.base_path, container.name, object_name) + + if os.path.isdir(full_path): + raise ObjectError(value=None, driver=self, object_name=object_name) + + try: + stat = os.stat(full_path) + except Exception: + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=object_name) + + # Make a hash for the file based on the metadata. We can safely + # use only the mtime attribute here. If the file contents change, + # the underlying file-system will change mtime + data_hash = self._get_hash_function() + data_hash.update(u(stat.st_mtime).encode('ascii')) + data_hash = data_hash.hexdigest() + + extra = {} + extra['creation_time'] = stat.st_ctime + extra['access_time'] = stat.st_atime + extra['modify_time'] = stat.st_mtime + + return Object(name=object_name, size=stat.st_size, extra=extra, + driver=self, container=container, hash=data_hash, + meta_data=None) + + def iterate_containers(self): + """ + Return a generator of containers. + + :return: A generator of Container instances. + :rtype: ``generator`` of :class:`Container` + """ + + for container_name in os.listdir(self.base_path): + full_path = os.path.join(self.base_path, container_name) + if not os.path.isdir(full_path): + continue + yield self._make_container(container_name) + + def _get_objects(self, container): + """ + Recursively iterate through the file-system and return the object names + """ + + cpath = self.get_container_cdn_url(container, check=True) + + for folder, subfolders, files in os.walk(cpath, topdown=True): + # Remove unwanted subfolders + for subf in IGNORE_FOLDERS: + if subf in subfolders: + subfolders.remove(subf) + + for name in files: + full_path = os.path.join(folder, name) + object_name = relpath(full_path, start=cpath) + yield self._make_object(container, object_name) + + def iterate_container_objects(self, container): + """ + Returns a generator of objects for the given container. + + :param container: Container instance + :type container: :class:`Container` + + :return: A generator of Object instances. + :rtype: ``generator`` of :class:`Object` + """ + + return self._get_objects(container) + + def get_container(self, container_name): + """ + Return a container instance. + + :param container_name: Container name. + :type container_name: ``str`` + + :return: :class:`Container` instance. + :rtype: :class:`Container` + """ + return self._make_container(container_name) + + def get_container_cdn_url(self, container, check=False): + """ + Return a container CDN URL. + + :param container: Container instance + :type container: :class:`Container` + + :param check: Indicates if the path's existence must be checked + :type check: ``bool`` + + :return: A CDN URL for this container. + :rtype: ``str`` + """ + path = os.path.join(self.base_path, container.name) + + if check and not os.path.isdir(path): + raise ContainerDoesNotExistError(value=None, driver=self, + container_name=container.name) + + return path + + def get_object(self, container_name, object_name): + """ + Return an object instance. + + :param container_name: Container name. + :type container_name: ``str`` + + :param object_name: Object name. + :type object_name: ``str`` + + :return: :class:`Object` instance. + :rtype: :class:`Object` + """ + container = self._make_container(container_name) + return self._make_object(container, object_name) + + def get_object_cdn_url(self, obj): + """ + Return a object CDN URL. + + :param obj: Object instance + :type obj: :class:`Object` + + :return: A CDN URL for this object. + :rtype: ``str`` + """ + return os.path.join(self.base_path, obj.container.name, obj.name) + + def enable_container_cdn(self, container): + """ + Enable container CDN. + + :param container: Container instance + :type container: :class:`Container` + + :rtype: ``bool`` + """ + + path = self.get_container_cdn_url(container) + lockfile.MkdirFileLock(path, threaded=True) + + with LockLocalStorage(path): + self._make_path(path) + + return True + + def enable_object_cdn(self, obj): + """ + Enable object CDN. + + :param obj: Object instance + :type obj: :class:`Object` + + :rtype: ``bool`` + """ + path = self.get_object_cdn_url(obj) + + with LockLocalStorage(path): + if os.path.exists(path): + return False + try: + obj_file = open(path, 'w') + obj_file.close() + except: + return False + + return True + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + """ + Download an object to the specified destination path. + + :param obj: Object instance. + :type obj: :class:`Object` + + :param destination_path: Full path to a file or a directory where the + incoming file will be saved. + :type destination_path: ``str`` + + :param overwrite_existing: True to overwrite an existing file, + defaults to False. + :type overwrite_existing: ``bool`` + + :param delete_on_failure: True to delete a partially downloaded file if + the download was not successful (hash mismatch / file size). + :type delete_on_failure: ``bool`` + + :return: True if an object has been successfully downloaded, False + otherwise. + :rtype: ``bool`` + """ + + obj_path = self.get_object_cdn_url(obj) + base_name = os.path.basename(destination_path) + + if not base_name and not os.path.exists(destination_path): + raise LibcloudError( + value='Path %s does not exist' % (destination_path), + driver=self) + + if not base_name: + file_path = os.path.join(destination_path, obj.name) + else: + file_path = destination_path + + if os.path.exists(file_path) and not overwrite_existing: + raise LibcloudError( + value='File %s already exists, but ' % (file_path) + + 'overwrite_existing=False', + driver=self) + + try: + shutil.copy(obj_path, file_path) + except IOError: + if delete_on_failure: + try: + os.unlink(file_path) + except Exception: + pass + return False + + return True + + def download_object_as_stream(self, obj, chunk_size=None): + """ + Return a generator which yields object data. + + :param obj: Object instance + :type obj: :class:`Object` + + :param chunk_size: Optional chunk size (in bytes). + :type chunk_size: ``int`` + + :rtype: ``object`` + """ + + path = self.get_object_cdn_url(obj) + + with open(path) as obj_file: + for data in read_in_chunks(obj_file, chunk_size=chunk_size): + yield data + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + """ + Upload an object currently located on a disk. + + :param file_path: Path to the object on disk. + :type file_path: ``str`` + + :param container: Destination container. + :type container: :class:`Container` + + :param object_name: Object name. + :type object_name: ``str`` + + :param verify_hash: Verify hast + :type verify_hash: ``bool`` + + :param extra: (optional) Extra attributes (driver specific). + :type extra: ``dict`` + + :rtype: ``object`` + """ + + path = self.get_container_cdn_url(container, check=True) + obj_path = os.path.join(path, object_name) + base_path = os.path.dirname(obj_path) + + self._make_path(base_path) + + with LockLocalStorage(obj_path): + shutil.copy(file_path, obj_path) + + os.chmod(obj_path, int('664', 8)) + + return self._make_object(container, object_name) + + def upload_object_via_stream(self, iterator, container, + object_name, + extra=None): + """ + Upload an object using an iterator. + + If a provider supports it, chunked transfer encoding is used and you + don't need to know in advance the amount of data to be uploaded. + + Otherwise if a provider doesn't support it, iterator will be exhausted + so a total size for data to be uploaded can be determined. + + Note: Exhausting the iterator means that the whole data must be + buffered in memory which might result in memory exhausting when + uploading a very large object. + + If a file is located on a disk you are advised to use upload_object + function which uses fs.stat function to determine the file size and it + doesn't need to buffer whole object in the memory. + + :type iterator: ``object`` + :param iterator: An object which implements the iterator interface. + + :type container: :class:`Container` + :param container: Destination container. + + :type object_name: ``str`` + :param object_name: Object name. + + :type extra: ``dict`` + :param extra: (optional) Extra attributes (driver specific). Note: + This dictionary must contain a 'content_type' key which represents + a content type of the stored object. + + :rtype: ``object`` + """ + + path = self.get_container_cdn_url(container, check=True) + obj_path = os.path.join(path, object_name) + base_path = os.path.dirname(obj_path) + + self._make_path(base_path) + + with LockLocalStorage(obj_path): + obj_file = open(obj_path, 'w') + for data in iterator: + obj_file.write(data) + + obj_file.close() + + os.chmod(obj_path, int('664', 8)) + + return self._make_object(container, object_name) + + def delete_object(self, obj): + """ + Delete an object. + + :type obj: :class:`Object` + :param obj: Object instance. + + :return: ``bool`` True on success. + :rtype: ``bool`` + """ + + path = self.get_object_cdn_url(obj) + + with LockLocalStorage(path): + try: + os.unlink(path) + except Exception: + return False + + # Check and delete all the empty parent folders + path = os.path.dirname(path) + container_url = obj.container.get_cdn_url() + + # Delete the empty parent folders till the container's level + while path != container_url: + try: + os.rmdir(path) + except OSError: + exp = sys.exc_info()[1] + if exp.errno == errno.ENOTEMPTY: + break + raise exp + + path = os.path.dirname(path) + + return True + + def create_container(self, container_name): + """ + Create a new container. + + :type container_name: ``str`` + :param container_name: Container name. + + :return: :class:`Container` instance on success. + :rtype: :class:`Container` + """ + + self._check_container_name(container_name) + + path = os.path.join(self.base_path, container_name) + + try: + self._make_path(path, ignore_existing=False) + except OSError: + exp = sys.exc_info()[1] + if exp.errno == errno.EEXIST: + raise ContainerAlreadyExistsError( + value='Container with this name already exists. The name ' + 'must be unique among all the containers in the ' + 'system', + container_name=container_name, driver=self) + else: + raise LibcloudError( + 'Error creating container %s' % container_name, + driver=self) + except Exception: + raise LibcloudError( + 'Error creating container %s' % container_name, driver=self) + + return self._make_container(container_name) + + def delete_container(self, container): + """ + Delete a container. + + :type container: :class:`Container` + :param container: Container instance + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + + # Check if there are any objects inside this + for obj in self._get_objects(container): + raise ContainerIsNotEmptyError(value='Container is not empty', + container_name=container.name, + driver=self) + + path = self.get_container_cdn_url(container, check=True) + + with LockLocalStorage(path): + try: + shutil.rmtree(path) + except Exception: + return False + + return True diff -Nru libcloud-0.5.0/libcloud/storage/drivers/nimbus.py libcloud-0.15.1/libcloud/storage/drivers/nimbus.py --- libcloud-0.5.0/libcloud/storage/drivers/nimbus.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/nimbus.py 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import hashlib +import hmac + +try: + import simplejson as json +except ImportError: + import json # NOQA + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.storage.base import Container, StorageDriver + + +class NimbusResponse(JsonResponse): + valid_response_codes = [httplib.OK, httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST] + + def success(self): + return self.status in self.valid_response_codes + + def parse_error(self): + if self.status in [httplib.UNAUTHORIZED]: + raise InvalidCredsError(self.body) + raise LibcloudError('Unknown error. Status code: %d' % (self.status), + driver=self.driver) + + +class NimbusConnection(ConnectionUserAndKey): + host = 'nimbus.io' + responseCls = NimbusResponse + + def __init__(self, *args, **kwargs): + self.id = kwargs.pop('id') + super(NimbusConnection, self).__init__(*args, **kwargs) + + def pre_connect_hook(self, params, headers): + timestamp = str(int(time.time())) + signature = self._calculate_signature(user_id=self.user_id, + method=self.method, + params=params, + path=self.action, + timestamp=timestamp, + key=self.key) + headers['X-NIMBUS-IO-Timestamp'] = timestamp + headers['Authorization'] = 'NIMBUS.IO %s:%s' % (self.id, signature) + return params, headers + + def _calculate_signature(self, user_id, method, params, path, timestamp, + key): + if params: + uri_path = path + '?' + urlencode(params) + else: + uri_path = path + + string_to_sign = [user_id, method, str(timestamp), uri_path] + string_to_sign = '\n'.join(string_to_sign) + + hmac_value = hmac.new(key, string_to_sign, hashlib.sha256) + return hmac_value.hexdigest() + + +class NimbusStorageDriver(StorageDriver): + name = 'Nimbus.io' + website = 'https://nimbus.io/' + connectionCls = NimbusConnection + + def __init__(self, *args, **kwargs): + self.user_id = kwargs['user_id'] + super(NimbusStorageDriver, self).__init__(*args, **kwargs) + + def iterate_containers(self): + response = self.connection.request('/customers/%s/collections' % + (self.connection.user_id)) + return self._to_containers(response.object) + + def create_container(self, container_name): + params = {'action': 'create', 'name': container_name} + response = self.connection.request('/customers/%s/collections' % + (self.connection.user_id), + params=params, + method='POST') + return self._to_container(response.object) + + def _to_containers(self, data): + for item in data: + yield self._to_container(item) + + def _to_container(self, data): + name = data[0] + extra = {'date_created': data[2]} + return Container(name=name, extra=extra, driver=self) + + def _ex_connection_class_kwargs(self): + result = {'id': self.user_id} + return result diff -Nru libcloud-0.5.0/libcloud/storage/drivers/ninefold.py libcloud-0.15.1/libcloud/storage/drivers/ninefold.py --- libcloud-0.5.0/libcloud/storage/drivers/ninefold.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/ninefold.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.storage.providers import Provider +from libcloud.storage.drivers.atmos import AtmosDriver + + +class NinefoldStorageDriver(AtmosDriver): + host = 'api.ninefold.com' + path = '/storage/v1.0' + + type = Provider.NINEFOLD + name = 'Ninefold' + website = 'http://ninefold.com/' diff -Nru libcloud-0.5.0/libcloud/storage/drivers/s3.py libcloud-0.15.1/libcloud/storage/drivers/s3.py --- libcloud-0.5.0/libcloud/storage/drivers/s3.py 2011-05-21 15:42:52.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/drivers/s3.py 2014-06-11 14:27:59.000000000 +0000 @@ -14,20 +14,29 @@ # limitations under the License. import time -import httplib -import urllib import copy import base64 import hmac +import sys from hashlib import sha1 -from xml.etree.ElementTree import Element, SubElement, tostring -from libcloud.utils import fixxpath, findtext, in_development_warning -from libcloud.utils import read_in_chunks +try: + from lxml.etree import Element, SubElement +except ImportError: + from xml.etree.ElementTree import Element, SubElement + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import urlencode +from libcloud.utils.py3 import b +from libcloud.utils.py3 import tostring + +from libcloud.utils.xml import fixxpath, findtext +from libcloud.utils.files import read_in_chunks from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.base import ConnectionUserAndKey, RawResponse -from libcloud.common.aws import AWSBaseResponse +from libcloud.common.aws import AWSBaseResponse, AWSDriver, AWSTokenConnection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerIsNotEmptyError @@ -36,13 +45,13 @@ from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError -in_development_warning('libcloud.storage.drivers.s3') # How long before the token expires EXPIRATION_SECONDS = 15 * 60 S3_US_STANDARD_HOST = 's3.amazonaws.com' S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com' +S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com' S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com' S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com' S3_AP_NORTHEAST_HOST = 's3-ap-northeast-1.amazonaws.com' @@ -50,18 +59,25 @@ API_VERSION = '2006-03-01' NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION) +# AWS multi-part chunks must be minimum 5MB +CHUNK_SIZE = 5 * 1024 * 1024 + +# Desired number of items in each response inside a paginated request in +# ex_iterate_multipart_uploads. +RESPONSES_PER_REQUEST = 100 -class S3Response(AWSBaseResponse): - valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT, - httplib.BAD_REQUEST ] +class S3Response(AWSBaseResponse): + namespace = None + valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST] def success(self): i = int(self.status) return i >= 200 and i <= 299 or i in self.valid_response_codes def parse_error(self): - if self.status in [ httplib.UNAUTHORIZED, httplib.FORBIDDEN ]: + if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(self.body) elif self.status == httplib.MOVED_PERMANENTLY: raise LibcloudError('This bucket is located in a different ' + @@ -70,12 +86,14 @@ raise LibcloudError('Unknown error. Status code: %d' % (self.status), driver=S3StorageDriver) + class S3RawResponse(S3Response, RawResponse): pass -class S3Connection(ConnectionUserAndKey): + +class BaseS3Connection(ConnectionUserAndKey): """ - Repersents a single connection to the EC2 Endpoint + Represents a single connection to the S3 Endpoint """ host = 's3.amazonaws.com' @@ -89,18 +107,16 @@ return params def pre_connect_hook(self, params, headers): - params['Signature'] = self._get_aws_auth_param(method=self.method, - headers=headers, - params=params, - expires=params['Expires'], - secret_key=self.key, - path=self.action) + params['Signature'] = self._get_aws_auth_param( + method=self.method, headers=headers, params=params, + expires=params['Expires'], secret_key=self.key, path=self.action) return params, headers def _get_aws_auth_param(self, method, headers, params, expires, secret_key, path='/'): """ - Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); + Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, + UTF-8-Encoding-Of( StringToSign ) ) ) ); StringToSign = HTTP-VERB + "\n" + Content-MD5 + "\n" + @@ -109,36 +125,37 @@ CanonicalizedAmzHeaders + CanonicalizedResource; """ - special_header_keys = [ 'content-md5', 'content-type', 'date' ] - special_header_values = { 'date': '' } + special_header_keys = ['content-md5', 'content-type', 'date'] + special_header_values = {'date': ''} amz_header_values = {} headers_copy = copy.deepcopy(headers) - for key, value in headers_copy.iteritems(): - if key.lower() in special_header_keys: - special_header_values[key.lower()] = value.lower().strip() - elif key.lower().startswith('x-amz-'): + for key, value in list(headers_copy.items()): + key_lower = key.lower() + if key_lower in special_header_keys: + special_header_values[key_lower] = value.strip() + elif key_lower.startswith('x-amz-'): amz_header_values[key.lower()] = value.strip() - if not special_header_values.has_key('content-md5'): + if 'content-md5' not in special_header_values: special_header_values['content-md5'] = '' - if not special_header_values.has_key('content-type'): + if 'content-type' not in special_header_values: special_header_values['content-type'] = '' if expires: special_header_values['date'] = str(expires) - keys_sorted = special_header_values.keys() + keys_sorted = list(special_header_values.keys()) keys_sorted.sort() - buf = [ method ] + buf = [method] for key in keys_sorted: value = special_header_values[key] buf.append(value) string_to_sign = '\n'.join(buf) - keys_sorted = amz_header_values.keys() + keys_sorted = list(amz_header_values.keys()) keys_sorted.sort() amz_header_string = [] @@ -148,23 +165,70 @@ amz_header_string = '\n'.join(amz_header_string) values_to_sign = [] - for value in [ string_to_sign, amz_header_string, path]: + for value in [string_to_sign, amz_header_string, path]: if value: values_to_sign.append(value) string_to_sign = '\n'.join(values_to_sign) b64_hmac = base64.b64encode( - hmac.new(secret_key, string_to_sign, digestmod=sha1).digest() + hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() ) - return b64_hmac + return b64_hmac.decode('utf-8') + + +class S3Connection(AWSTokenConnection, BaseS3Connection): + """ + Represents a single connection to the S3 endpoint, with AWS-specific + features. + """ + pass + + +class S3MultipartUpload(object): + """ + Class representing an amazon s3 multipart upload + """ + + def __init__(self, key, id, created_at, initiator, owner): + """ + Class representing an amazon s3 multipart upload + + :param key: The object/key that was being uploaded + :type key: ``str`` + + :param id: The upload id assigned by amazon + :type id: ``str`` + + :param created_at: The date/time at which the upload was started + :type created_at: ``str`` + + :param initiator: The AWS owner/IAM user who initiated this + :type initiator: ``str`` + + :param owner: The AWS owner/IAM who will own this object + :type owner: ``str`` + """ + self.key = key + self.id = id + self.created_at = created_at + self.initiator = initiator + self.owner = owner + + def __repr__(self): + return ('' % (self.key)) + -class S3StorageDriver(StorageDriver): +class BaseS3StorageDriver(StorageDriver): name = 'Amazon S3 (standard)' - connectionCls = S3Connection + website = 'http://aws.amazon.com/s3/' + connectionCls = BaseS3Connection hash_type = 'md5' + supports_chunked_encoding = False + supports_s3_multipart_upload = True ex_location_name = '' + namespace = NAMESPACE - def list_containers(self): + def iterate_containers(self): response = self.connection.request('/') if response.status == httplib.OK: containers = self._to_containers(obj=response.object, @@ -174,36 +238,83 @@ raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) - def list_container_objects(self, container): - response = self.connection.request('/%s' % (container.name)) - if response.status == httplib.OK: - objects = self._to_objs(obj=response.object, - xpath='Contents', container=container) - return objects + def list_container_objects(self, container, ex_prefix=None): + """ + Return a list of objects for the given container. - raise LibcloudError('Unexpected status code: %s' % (response.status), - driver=self) + :param container: Container instance. + :type container: :class:`Container` - def get_container(self, container_name): - # This is very inefficient, but afaik it's the only way to do it - containers = self.list_containers() + :param ex_prefix: Only return objects starting with ex_prefix + :type ex_prefix: ``str`` - try: - container = [ c for c in containers if c.name == container_name ][0] - except IndexError: - raise ContainerDoesNotExistError(value=None, driver=self, - container_name=container_name) + :return: A list of Object instances. + :rtype: ``list`` of :class:`Object` + """ + return list(self.iterate_container_objects(container, + ex_prefix=ex_prefix)) - return container + def iterate_container_objects(self, container, ex_prefix=None): + """ + Return a generator of objects for the given container. + + :param container: Container instance + :type container: :class:`Container` + + :param ex_prefix: Only return objects starting with ex_prefix + :type ex_prefix: ``str`` + + :return: A generator of Object instances. + :rtype: ``generator`` of :class:`Object` + """ + params = {} + if ex_prefix: + params['prefix'] = ex_prefix + + last_key = None + exhausted = False + container_path = self._get_container_path(container) + + while not exhausted: + if last_key: + params['marker'] = last_key + + response = self.connection.request(container_path, + params=params) + + if response.status != httplib.OK: + raise LibcloudError('Unexpected status code: %s' % + (response.status), driver=self) + + objects = self._to_objs(obj=response.object, + xpath='Contents', container=container) + is_truncated = response.object.findtext(fixxpath( + xpath='IsTruncated', namespace=self.namespace)).lower() + exhausted = (is_truncated == 'false') + + last_key = None + for obj in objects: + last_key = obj.name + yield obj + + def get_container(self, container_name): + try: + response = self.connection.request('/%s' % container_name, + method='HEAD') + if response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, driver=self, + container_name=container_name) + except InvalidCredsError: + # This just means the user doesn't have IAM permissions to do a + # HEAD request but other requests might work. + pass + return Container(name=container_name, extra=None, driver=self) def get_object(self, container_name, object_name): - # TODO: Figure out what is going on when the object or container does not exist - # - it seems that Amazon just keeps the connection open and doesn't return a - # response. container = self.get_container(container_name=container_name) - response = self.connection.request('/%s/%s' % (container_name, - object_name), - method='HEAD') + object_path = self._get_object_path(container, object_name) + response = self.connection.request(object_path, method='HEAD') + if response.status == httplib.OK: obj = self._headers_to_object(object_name=object_name, container=container, @@ -213,11 +324,42 @@ raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) + def _get_container_path(self, container): + """ + Return a container path + + :param container: Container instance + :type container: :class:`Container` + + :return: A path for this container. + :rtype: ``str`` + """ + return '/%s' % (container.name) + + def _get_object_path(self, container, object_name): + """ + Return an object's CDN path. + + :param container: Container instance + :type container: :class:`Container` + + :param object_name: Object name + :type object_name: :class:`str` + + :return: A path for this object. + :rtype: ``str`` + """ + container_url = self._get_container_path(container) + object_name_cleaned = self._clean_object_name(object_name) + object_path = '%s/%s' % (container_url, object_name_cleaned) + return object_path + def create_container(self, container_name): if self.ex_location_name: root = Element('CreateBucketConfiguration') child = SubElement(root, 'LocationConstraint') child.text = self.ex_location_name + data = tostring(root) else: data = '' @@ -230,10 +372,10 @@ container = Container(name=container_name, extra=None, driver=self) return container elif response.status == httplib.CONFLICT: - raise InvalidContainerNameError(value='Container with this name ' + - 'already exists. The name must be unique among ' - 'all the containers in the system', - container_name=container_name, driver=self) + raise InvalidContainerNameError( + value='Container with this name already exists. The name must ' + 'be unique among all the containers in the system', + container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise InvalidContainerNameError(value='Container name contains ' + 'invalid characters.', @@ -250,10 +392,9 @@ if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.CONFLICT: - raise ContainerIsNotEmptyError(value='Container must be empty' + - ' before it can be deleted.', - container_name=container.name, - driver=self) + raise ContainerIsNotEmptyError( + value='Container must be empty before it can be deleted.', + container_name=container.name, driver=self) elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, @@ -263,40 +404,40 @@ def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): - container_name = self._clean_object_name(obj.container.name) - object_name = self._clean_object_name(obj.name) + obj_path = self._get_object_path(obj.container, obj.name) - response = self.connection.request('/%s/%s' % (container_name, - object_name), - method='GET', - raw=True) + response = self.connection.request(obj_path, method='GET', raw=True) return self._get_object(obj=obj, callback=self._save_object, response=response, - callback_kwargs={'obj': obj, - 'response': response.response, - 'destination_path': destination_path, - 'overwrite_existing': overwrite_existing, - 'delete_on_failure': delete_on_failure}, + callback_kwargs={ + 'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): - container_name = self._clean_object_name(obj.container.name) - object_name = self._clean_object_name(obj.name) - response = self.connection.request('/%s/%s' % (container_name, - object_name), - method='GET', raw=True) + obj_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(obj_path, method='GET', raw=True) return self._get_object(obj=obj, callback=read_in_chunks, response=response, - callback_kwargs={ 'iterator': response.response, - 'chunk_size': chunk_size}, + callback_kwargs={'iterator': response.response, + 'chunk_size': chunk_size}, success_status_code=httplib.OK) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, ex_storage_class=None): + """ + @inherits: :class:`StorageDriver.upload_object` + + :param ex_storage_class: Storage class + :type ex_storage_class: ``str`` + """ upload_func = self._upload_file - upload_func_kwargs = { 'file_path': file_path } + upload_func_kwargs = {'file_path': file_path} return self._put_object(container=container, object_name=object_name, upload_func=upload_func, @@ -305,65 +446,384 @@ verify_hash=verify_hash, storage_class=ex_storage_class) + def _upload_multipart(self, response, data, iterator, container, + object_name, calculate_hash=True): + """ + Callback invoked for uploading data to S3 using Amazon's + multipart upload mechanism + + :param response: Response object from the initial POST request + :type response: :class:`S3RawResponse` + + :param data: Any data from the initial POST request + :type data: ``str`` + + :param iterator: The generator for fetching the upload data + :type iterator: ``generator`` + + :param container: The container owning the object to which data is + being uploaded + :type container: :class:`Container` + + :param object_name: The name of the object to which we are uploading + :type object_name: ``str`` + + :keyword calculate_hash: Indicates if we must calculate the data hash + :type calculate_hash: ``bool`` + + :return: A tuple of (status, checksum, bytes transferred) + :rtype: ``tuple`` + """ + + object_path = self._get_object_path(container, object_name) + + # Get the upload id from the response xml + response.body = response.response.read() + body = response.parse_body() + upload_id = body.find(fixxpath(xpath='UploadId', + namespace=self.namespace)).text + + try: + # Upload the data through the iterator + result = self._upload_from_iterator(iterator, object_path, + upload_id, calculate_hash) + (chunks, data_hash, bytes_transferred) = result + + # Commit the chunk info and complete the upload + etag = self._commit_multipart(object_path, upload_id, chunks) + except Exception: + exc = sys.exc_info()[1] + # Amazon provides a mechanism for aborting an upload. + self._abort_multipart(object_path, upload_id) + raise exc + + # Modify the response header of the first request. This is used + # by other functions once the callback is done + response.headers['etag'] = etag + + return (True, data_hash, bytes_transferred) + + def _upload_from_iterator(self, iterator, object_path, upload_id, + calculate_hash=True): + """ + Uploads data from an interator in fixed sized chunks to S3 + + :param iterator: The generator for fetching the upload data + :type iterator: ``generator`` + + :param object_path: The path of the object to which we are uploading + :type object_name: ``str`` + + :param upload_id: The upload id allocated for this multipart upload + :type upload_id: ``str`` + + :keyword calculate_hash: Indicates if we must calculate the data hash + :type calculate_hash: ``bool`` + + :return: A tuple of (chunk info, checksum, bytes transferred) + :rtype: ``tuple`` + """ + + data_hash = None + if calculate_hash: + data_hash = self._get_hash_function() + + bytes_transferred = 0 + count = 1 + chunks = [] + params = {'uploadId': upload_id} + + # Read the input data in chunk sizes suitable for AWS + for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE, + fill_size=True, yield_empty=True): + bytes_transferred += len(data) + + if calculate_hash: + data_hash.update(data) + + chunk_hash = self._get_hash_function() + chunk_hash.update(data) + chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8') + + # This provides an extra level of data check and is recommended + # by amazon + headers = {'Content-MD5': chunk_hash} + params['partNumber'] = count + + request_path = '?'.join((object_path, urlencode(params))) + + resp = self.connection.request(request_path, method='PUT', + data=data, headers=headers) + + if resp.status != httplib.OK: + raise LibcloudError('Error uploading chunk', driver=self) + + server_hash = resp.headers['etag'] + + # Keep this data for a later commit + chunks.append((count, server_hash)) + count += 1 + + if calculate_hash: + data_hash = data_hash.hexdigest() + + return (chunks, data_hash, bytes_transferred) + + def _commit_multipart(self, object_path, upload_id, chunks): + """ + Makes a final commit of the data. + + :param object_path: Server side object path. + :type object_path: ``str`` + + :param upload_id: ID of the multipart upload. + :type upload_id: ``str`` + + :param upload_id: A list of (chunk_number, chunk_hash) tuples. + :type upload_id: ``list`` + """ + + root = Element('CompleteMultipartUpload') + + for (count, etag) in chunks: + part = SubElement(root, 'Part') + part_no = SubElement(part, 'PartNumber') + part_no.text = str(count) + + etag_id = SubElement(part, 'ETag') + etag_id.text = str(etag) + + data = tostring(root) + + params = {'uploadId': upload_id} + request_path = '?'.join((object_path, urlencode(params))) + response = self.connection.request(request_path, data=data, + method='POST') + + if response.status != httplib.OK: + element = response.object + code, message = response._parse_error_details(element=element) + msg = 'Error in multipart commit: %s (%s)' % (message, code) + raise LibcloudError(msg, driver=self) + + # Get the server's etag to be passed back to the caller + body = response.parse_body() + server_hash = body.find(fixxpath(xpath='ETag', + namespace=self.namespace)).text + return server_hash + + def _abort_multipart(self, object_path, upload_id): + """ + Aborts an already initiated multipart upload + + :param object_path: Server side object path. + :type object_path: ``str`` + + :param upload_id: ID of the multipart upload. + :type upload_id: ``str`` + """ + + params = {'uploadId': upload_id} + request_path = '?'.join((object_path, urlencode(params))) + resp = self.connection.request(request_path, method='DELETE') + + if resp.status != httplib.NO_CONTENT: + raise LibcloudError('Error in multipart abort. status_code=%d' % + (resp.status), driver=self) + def upload_object_via_stream(self, iterator, container, object_name, extra=None, ex_storage_class=None): - # Amazon S3 does not support chunked transfer encoding. - # Using multipart upload to "emulate" it would mean unnecessary - # buffering of data in memory. - raise NotImplementedError( - 'upload_object_via_stream not implemented for this driver') + """ + @inherits: :class:`StorageDriver.upload_object_via_stream` + + :param ex_storage_class: Storage class + :type ex_storage_class: ``str`` + """ + + method = 'PUT' + params = None + + # This driver is used by other S3 API compatible drivers also. + # Amazon provides a different (complex?) mechanism to do multipart + # uploads + if self.supports_s3_multipart_upload: + # Initiate the multipart request and get an upload id + upload_func = self._upload_multipart + upload_func_kwargs = {'iterator': iterator, + 'container': container, + 'object_name': object_name} + method = 'POST' + iterator = iter('') + params = 'uploads' + + elif self.supports_chunked_encoding: + upload_func = self._stream_data + upload_func_kwargs = {'iterator': iterator} + else: + # In this case, we have to load the entire object to + # memory and send it as normal data + upload_func = self._upload_data + upload_func_kwargs = {} + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, method=method, query_args=params, + iterator=iterator, verify_hash=False, + storage_class=ex_storage_class) def delete_object(self, obj): - object_name = self._clean_object_name(name=obj.name) - response = self.connection.request('/%s/%s' % (obj.container.name, - object_name), - method='DELETE') + object_path = self._get_object_path(obj.container, obj.name) + response = self.connection.request(object_path, method='DELETE') if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, - object_name=obj.name) + object_name=obj.name) return False + def ex_iterate_multipart_uploads(self, container, prefix=None, + delimiter=None): + """ + Extension method for listing all in-progress S3 multipart uploads. + + Each multipart upload which has not been committed or aborted is + considered in-progress. + + :param container: The container holding the uploads + :type container: :class:`Container` + + :keyword prefix: Print only uploads of objects with this prefix + :type prefix: ``str`` + + :keyword delimiter: The object/key names are grouped based on + being split by this delimiter + :type delimiter: ``str`` + + :return: A generator of S3MultipartUpload instances. + :rtype: ``generator`` of :class:`S3MultipartUpload` + """ + + if not self.supports_s3_multipart_upload: + raise LibcloudError('Feature not supported', driver=self) + + # Get the data for a specific container + request_path = '%s/?uploads' % (self._get_container_path(container)) + params = {'max-uploads': RESPONSES_PER_REQUEST} + + if prefix: + params['prefix'] = prefix + + if delimiter: + params['delimiter'] = delimiter + + finder = lambda node, text: node.findtext(fixxpath(xpath=text, + namespace=self.namespace)) + + while True: + response = self.connection.request(request_path, params=params) + + if response.status != httplib.OK: + raise LibcloudError('Error fetching multipart uploads. ' + 'Got code: %s' % (response.status), + driver=self) + + body = response.parse_body() + for node in body.findall(fixxpath(xpath='Upload', + namespace=self.namespace)): + + initiator = node.find(fixxpath(xpath='Initiator', + namespace=self.namespace)) + owner = node.find(fixxpath(xpath='Owner', + namespace=self.namespace)) + + key = finder(node, 'Key') + upload_id = finder(node, 'UploadId') + created_at = finder(node, 'Initiated') + initiator = finder(initiator, 'DisplayName') + owner = finder(owner, 'DisplayName') + + yield S3MultipartUpload(key, upload_id, created_at, + initiator, owner) + + # Check if this is the last entry in the listing + is_truncated = body.findtext(fixxpath(xpath='IsTruncated', + namespace=self.namespace)) + + if is_truncated.lower() == 'false': + break + + # Provide params for the next request + upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker', + namespace=self.namespace)) + key_marker = body.findtext(fixxpath(xpath='NextKeyMarker', + namespace=self.namespace)) + + params['key-marker'] = key_marker + params['upload-id-marker'] = upload_marker + + def ex_cleanup_all_multipart_uploads(self, container, prefix=None): + """ + Extension method for removing all partially completed S3 multipart + uploads. + + :param container: The container holding the uploads + :type container: :class:`Container` + + :keyword prefix: Delete only uploads of objects with this prefix + :type prefix: ``str`` + """ + + # Iterate through the container and delete the upload ids + for upload in self.ex_iterate_multipart_uploads(container, prefix, + delimiter=None): + object_path = '/%s/%s' % (container.name, upload.key) + self._abort_multipart(object_path, upload.id) + def _clean_object_name(self, name): - name = urllib.quote(name) + name = urlquote(name) return name def _put_object(self, container, object_name, upload_func, - upload_func_kwargs, extra=None, file_path=None, - iterator=None, verify_hash=True, storage_class=None): + upload_func_kwargs, method='PUT', query_args=None, + extra=None, file_path=None, iterator=None, + verify_hash=True, storage_class=None): headers = {} extra = extra or {} storage_class = storage_class or 'standard' if storage_class not in ['standard', 'reduced_redundancy']: - raise ValueError('Invalid storage class value: %s' % (storage_class)) + raise ValueError( + 'Invalid storage class value: %s' % (storage_class)) headers['x-amz-storage-class'] = storage_class.upper() - container_name_cleaned = container.name - object_name_cleaned = self._clean_object_name(object_name) content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) + acl = extra.get('acl', None) if meta_data: - for key, value in meta_data.iteritems(): + for key, value in list(meta_data.items()): key = 'x-amz-meta-%s' % (key) headers[key] = value - request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + if acl: + headers['x-amz-acl'] = acl + + request_path = self._get_object_path(container, object_name) + + if query_args: + request_path = '?'.join((request_path, query_args)) + # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE # here. - # SIGPIPE is thrown if the provided container does not exist or the user - # does not have correct permission - result_dict = self._upload_object(object_name=object_name, - content_type=content_type, - upload_func=upload_func, - upload_func_kwargs=upload_func_kwargs, - request_path=request_path, - request_method='PUT', - headers=headers, file_path=file_path, - iterator=iterator) + # SIGPIPE is thrown if the provided container does not exist or the + # user does not have correct permission + result_dict = self._upload_object( + object_name=object_name, content_type=content_type, + upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, + request_path=request_path, request_method=method, + headers=headers, file_path=file_path, iterator=iterator) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] @@ -378,43 +838,56 @@ elif response.status == httplib.OK: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, - extra=None, meta_data=meta_data, container=container, + extra={'acl': acl}, meta_data=meta_data, container=container, driver=self) return obj else: - raise LibcloudError('Unexpected status code, status_code=%s' % (response.status), - driver=self) + raise LibcloudError( + 'Unexpected status code, status_code=%s' % (response.status), + driver=self) def _to_containers(self, obj, xpath): - return [ self._to_container(element) for element in \ - obj.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] + for element in obj.findall(fixxpath(xpath=xpath, + namespace=self.namespace)): + yield self._to_container(element) def _to_objs(self, obj, xpath, container): - return [ self._to_obj(element, container) for element in \ - obj.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] + return [self._to_obj(element, container) for element in + obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] def _to_container(self, element): extra = { 'creation_date': findtext(element=element, xpath='CreationDate', - namespace=NAMESPACE) + namespace=self.namespace) } - container = Container( - name=findtext(element=element, xpath='Name', - namespace=NAMESPACE), - extra=extra, - driver=self - ) + container = Container(name=findtext(element=element, xpath='Name', + namespace=self.namespace), + extra=extra, + driver=self + ) return container def _headers_to_object(self, object_name, container, headers): - meta_data = { 'content_type': headers['content-type'] } hash = headers['etag'].replace('"', '') + extra = {'content_type': headers['content-type'], + 'etag': headers['etag']} + meta_data = {} + + if 'last-modified' in headers: + extra['last_modified'] = headers['last-modified'] + + for key, value in headers.items(): + if not key.lower().startswith('x-amz-meta-'): + continue + + key = key.replace('x-amz-meta-', '') + meta_data[key] = value obj = Object(name=object_name, size=headers['content-length'], - hash=hash, extra=None, + hash=hash, extra=extra, meta_data=meta_data, container=container, driver=self) @@ -422,54 +895,80 @@ def _to_obj(self, element, container): owner_id = findtext(element=element, xpath='Owner/ID', - namespace=NAMESPACE) + namespace=self.namespace) owner_display_name = findtext(element=element, xpath='Owner/DisplayName', - namespace=NAMESPACE) - meta_data = { 'owner': { 'id': owner_id, - 'display_name':owner_display_name }} + namespace=self.namespace) + meta_data = {'owner': {'id': owner_id, + 'display_name': owner_display_name}} + last_modified = findtext(element=element, + xpath='LastModified', + namespace=self.namespace) + extra = {'last_modified': last_modified} obj = Object(name=findtext(element=element, xpath='Key', - namespace=NAMESPACE), + namespace=self.namespace), size=int(findtext(element=element, xpath='Size', - namespace=NAMESPACE)), + namespace=self.namespace)), hash=findtext(element=element, xpath='ETag', - namespace=NAMESPACE).replace('"', ''), - extra=None, + namespace=self.namespace).replace('"', ''), + extra=extra, meta_data=meta_data, container=container, driver=self - ) + ) return obj + +class S3StorageDriver(AWSDriver, BaseS3StorageDriver): + connectionCls = S3Connection + + class S3USWestConnection(S3Connection): host = S3_US_WEST_HOST + class S3USWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (us-west-1)' connectionCls = S3USWestConnection ex_location_name = 'us-west-1' + +class S3USWestOregonConnection(S3Connection): + host = S3_US_WEST_OREGON_HOST + + +class S3USWestOregonStorageDriver(S3StorageDriver): + name = 'Amazon S3 (us-west-2)' + connectionCls = S3USWestOregonConnection + ex_location_name = 'us-west-2' + + class S3EUWestConnection(S3Connection): host = S3_EU_WEST_HOST + class S3EUWestStorageDriver(S3StorageDriver): name = 'Amazon S3 (eu-west-1)' connectionCls = S3EUWestConnection ex_location_name = 'EU' + class S3APSEConnection(S3Connection): host = S3_AP_SOUTHEAST_HOST + class S3APSEStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-southeast-1)' connectionCls = S3APSEConnection ex_location_name = 'ap-southeast-1' + class S3APNEConnection(S3Connection): host = S3_AP_NORTHEAST_HOST + class S3APNEStorageDriver(S3StorageDriver): name = 'Amazon S3 (ap-northeast-1)' connectionCls = S3APNEConnection diff -Nru libcloud-0.5.0/libcloud/storage/__init__.py libcloud-0.15.1/libcloud/storage/__init__.py --- libcloud-0.5.0/libcloud/storage/__init__.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/__init__.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +""" +Module for working with Storage +""" diff -Nru libcloud-0.5.0/libcloud/storage/providers.py libcloud-0.15.1/libcloud/storage/providers.py --- libcloud-0.5.0/libcloud/storage/providers.py 2011-04-18 12:58:19.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/providers.py 2014-05-26 15:42:51.000000000 +0000 @@ -13,27 +13,55 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libcloud.utils import get_driver as get_provider_driver +from libcloud.utils.misc import get_driver as get_provider_driver +from libcloud.utils.misc import set_driver as set_provider_driver from libcloud.storage.types import Provider DRIVERS = { Provider.DUMMY: - ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), - Provider.CLOUDFILES_US: - ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), - Provider.CLOUDFILES_UK: - ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'), + ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), + Provider.CLOUDFILES: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesStorageDriver'), + Provider.OPENSTACK_SWIFT: + ('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver'), Provider.S3: - ('libcloud.storage.drivers.s3', 'S3StorageDriver'), + ('libcloud.storage.drivers.s3', 'S3StorageDriver'), Provider.S3_US_WEST: - ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'), + ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'), + Provider.S3_US_WEST_OREGON: + ('libcloud.storage.drivers.s3', 'S3USWestOregonStorageDriver'), Provider.S3_EU_WEST: - ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'), + ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'), Provider.S3_AP_SOUTHEAST: - ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'), + ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'), Provider.S3_AP_NORTHEAST: - ('libcloud.storage.drivers.s3', 'S3APNEStorageDriver'), + ('libcloud.storage.drivers.s3', 'S3APNEStorageDriver'), + Provider.NINEFOLD: + ('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'), + Provider.GOOGLE_STORAGE: + ('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver'), + Provider.NIMBUS: + ('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'), + Provider.LOCAL: + ('libcloud.storage.drivers.local', 'LocalStorageDriver'), + Provider.AZURE_BLOBS: + ('libcloud.storage.drivers.azure_blobs', 'AzureBlobsStorageDriver'), + Provider.KTUCLOUD: + ('libcloud.storage.drivers.ktucloud', 'KTUCloudStorageDriver'), + + # Deprecated + Provider.CLOUDFILES_US: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), + Provider.CLOUDFILES_UK: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'), + Provider.CLOUDFILES_SWIFT: + ('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver') } + def get_driver(provider): return get_provider_driver(DRIVERS, provider) + + +def set_driver(provider, module, klass): + return set_provider_driver(DRIVERS, provider, module, klass) diff -Nru libcloud-0.5.0/libcloud/storage/types.py libcloud-0.15.1/libcloud/storage/types.py --- libcloud-0.5.0/libcloud/storage/types.py 2011-05-21 11:27:19.000000000 +0000 +++ libcloud-0.15.1/libcloud/storage/types.py 2014-05-26 15:42:51.000000000 +0000 @@ -25,27 +25,45 @@ 'ObjectHashMismatchError', 'InvalidContainerNameError'] + class Provider(object): """ Defines for each of the supported providers - @cvar DUMMY: Example provider - @cvar CLOUDFILES_US: CloudFiles US - @cvar CLOUDFILES_UK: CloudFiles UK - @cvar S3: Amazon S3 US - @cvar S3_US_WEST: Amazon S3 US West (Northern California) - @cvar S3_EU_WEST: Amazon S3 EU West (Ireland) - @cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore) - @cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo) + :cvar DUMMY: Example provider + :cvar CLOUDFILES: CloudFiles + :cvar S3: Amazon S3 US + :cvar S3_US_WEST: Amazon S3 US West (Northern California) + :cvar S3_EU_WEST: Amazon S3 EU West (Ireland) + :cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore) + :cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo) + :cvar NINEFOLD: Ninefold + :cvar GOOGLE_STORAGE Google Storage + :cvar S3_US_WEST_OREGON: Amazon S3 US West 2 (Oregon) + :cvar NIMBUS: Nimbus.io driver + :cvar LOCAL: Local storage driver """ - DUMMY = 0 - CLOUDFILES_US = 1 - CLOUDFILES_UK = 2 - S3 = 3 - S3_US_WEST = 4 - S3_EU_WEST = 5 - S3_AP_SOUTHEAST = 6 - S3_AP_NORTHEAST = 7 + DUMMY = 'dummy' + S3 = 's3' + S3_US_WEST = 's3_us_west' + S3_EU_WEST = 's3_eu_west' + S3_AP_SOUTHEAST = 's3_ap_southeast' + S3_AP_NORTHEAST = 's3_ap_northeast' + NINEFOLD = 'ninefold' + GOOGLE_STORAGE = 'google_storage' + S3_US_WEST_OREGON = 's3_us_west_oregon' + NIMBUS = 'nimbus' + LOCAL = 'local' + OPENSTACK_SWIFT = 'openstack_swift' + CLOUDFILES = 'cloudfiles' + AZURE_BLOBS = 'azure_blobs' + KTUCLOUD = 'ktucloud' + + # Deperecated + CLOUDFILES_US = 'cloudfiles_us' + CLOUDFILES_UK = 'cloudfiles_uk' + CLOUDFILES_SWIFT = 'cloudfiles_swift' + class ContainerError(LibcloudError): error_type = 'ContainerError' @@ -59,6 +77,7 @@ (self.error_type, repr(self.driver), self.container_name, self.value)) + class ObjectError(LibcloudError): error_type = 'ContainerError' @@ -67,23 +86,34 @@ super(ObjectError, self).__init__(value=value, driver=driver) def __str__(self): - return '<%s in %s, value=%s, object = %s>' % (self.error_type, repr(self.driver), - self.value, self.object_name) + return self.__repr__() + + def __repr__(self): + return '<%s in %s, value=%s, object = %s>' % (self.error_type, + repr(self.driver), + self.value, + self.object_name) + class ContainerAlreadyExistsError(ContainerError): error_type = 'ContainerAlreadyExistsError' + class ContainerDoesNotExistError(ContainerError): error_type = 'ContainerDoesNotExistError' + class ContainerIsNotEmptyError(ContainerError): error_type = 'ContainerIsNotEmptyError' + class ObjectDoesNotExistError(ObjectError): error_type = 'ObjectDoesNotExistError' + class ObjectHashMismatchError(ObjectError): error_type = 'ObjectHashMismatchError' + class InvalidContainerNameError(ContainerError): error_type = 'InvalidContainerNameError' diff -Nru libcloud-0.5.0/libcloud/test/common/__init__.py libcloud-0.15.1/libcloud/test/common/__init__.py --- libcloud-0.5.0/libcloud/test/common/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/common/__init__.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff -Nru libcloud-0.5.0/libcloud/test/common/test_cloudstack.py libcloud-0.15.1/libcloud/test/common/test_cloudstack.py --- libcloud-0.5.0/libcloud/test/common/test_cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/common/test_cloudstack.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,210 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import b +from libcloud.utils.py3 import parse_qsl + +from libcloud.common.cloudstack import CloudStackConnection +from libcloud.common.types import MalformedResponseError + +from libcloud.test import MockHttpTestCase + + +async_delay = 0 + + +class CloudStackMockDriver(object): + host = 'nonexistent.' + path = '/path' + async_poll_frequency = 0 + + name = 'fake' + + async_delay = 0 + + +class CloudStackCommonTest(unittest.TestCase): + def setUp(self): + CloudStackConnection.conn_classes = (None, CloudStackMockHttp) + self.connection = CloudStackConnection('apikey', 'secret', + host=CloudStackMockDriver.host) + self.connection.poll_interval = 0.0 + self.driver = self.connection.driver = CloudStackMockDriver() + + def test_sync_request_bad_response(self): + self.driver.path = '/bad/response' + try: + self.connection._sync_request('fake') + except Exception: + e = sys.exc_info()[1] + self.assertTrue(isinstance(e, MalformedResponseError)) + return + self.assertTrue(False) + + def test_sync_request(self): + self.driver.path = '/sync' + self.connection._sync_request('fake') + + def test_async_request_successful(self): + self.driver.path = '/async/success' + result = self.connection._async_request('fake') + self.assertEqual(result, {'fake': 'result'}) + + def test_async_request_unsuccessful(self): + self.driver.path = '/async/fail' + try: + self.connection._async_request('fake') + except Exception: + e = sys.exc_info()[1] + self.assertEqual(CloudStackMockHttp.ERROR_TEXT, str(e)) + return + self.assertFalse(True) + + def test_async_request_delayed(self): + global async_delay + self.driver.path = '/async/delayed' + async_delay = 2 + self.connection._async_request('fake') + self.assertEqual(async_delay, 0) + + def test_signature_algorithm(self): + cases = [ + ( + { + 'command': 'listVirtualMachines' + }, 'z/a9Y7J52u48VpqIgiwaGUMCso0=' + ), ( + { + 'command': 'deployVirtualMachine', + 'name': 'fred', + 'displayname': 'George', + 'serviceofferingid': 5, + 'templateid': 17, + 'zoneid': 23, + 'networkids': 42 + }, 'gHTo7mYmadZ+zluKHzlEKb1i/QU=' + ), ( + { + 'command': 'deployVirtualMachine', + 'name': 'fred', + 'displayname': 'George+Ringo', + 'serviceofferingid': 5, + 'templateid': 17, + 'zoneid': 23, + 'networkids': 42 + }, 'tAgfrreI1ZvWlWLClD3gu4+aKv4=' + ) + ] + + connection = CloudStackConnection('fnord', 'abracadabra') + for case in cases: + params = connection.add_default_params(case[0]) + self.assertEqual(connection._make_signature(params), b(case[1])) + + +class CloudStackMockHttp(MockHttpTestCase): + + ERROR_TEXT = 'ERROR TEXT' + + def _response(self, status, result, response): + return (status, json.dumps(result), result, response) + + def _check_request(self, url): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + return query + + def _bad_response(self, method, url, body, headers): + self._check_request(url) + result = {'success': True} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _sync(self, method, url, body, headers): + query = self._check_request(url) + result = {query['command'].lower() + 'response': {}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_success(self, method, url, body, headers): + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + result = { + query['command'].lower() + 'response': { + 'jobstatus': 1, + 'jobresult': {'fake': 'result'} + } + } + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_fail(self, method, url, body, headers): + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + result = { + query['command'].lower() + 'response': { + 'jobstatus': 2, + 'jobresult': {'errortext': self.ERROR_TEXT} + } + } + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_delayed(self, method, url, body, headers): + global async_delay + + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + if async_delay == 0: + result = { + query['command'].lower() + 'response': { + 'jobstatus': 1, + 'jobresult': {'fake': 'result'} + } + } + else: + result = { + query['command'].lower() + 'response': { + 'jobstatus': 0, + } + } + async_delay -= 1 + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/common/test_gandi.py libcloud-0.15.1/libcloud/test/common/test_gandi.py --- libcloud-0.5.0/libcloud/test/common/test_gandi.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/common/test_gandi.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils.py3 import xmlrpclib +from libcloud.test import MockHttp + + +class BaseGandiMockHttp(MockHttp): + + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" + + def _xmlrpc(self, method, url, body, headers): + params, methodName = xmlrpclib.loads(body) + meth_name = '_xmlrpc__' + methodName.replace('.', '_') + if self.type: + meth_name = '%s_%s' % (meth_name, self.type) + return getattr(self, meth_name)(method, url, body, headers) diff -Nru libcloud-0.5.0/libcloud/test/common/test_google.py libcloud-0.15.1/libcloud/test/common/test_google.py --- libcloud-0.5.0/libcloud/test/common/test_google.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/common/test_google.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,244 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for Google Connection classes. +""" +import datetime +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib + +from libcloud.test import MockHttp, LibcloudTestCase +from libcloud.common.google import (GoogleAuthError, + GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleServiceAcctAuthConnection, + GoogleBaseConnection) +from libcloud.test.secrets import GCE_PARAMS + +# Skip some tests if PyCrypto is unavailable +try: + from Crypto.Hash import SHA256 +except ImportError: + SHA256 = None + + +class MockJsonResponse(object): + def __init__(self, body): + self.object = body + + +class GoogleBaseAuthConnectionTest(LibcloudTestCase): + """ + Tests for GoogleBaseAuthConnection + """ + GoogleBaseAuthConnection._now = lambda x: datetime.datetime(2013, 6, 26, + 19, 0, 0) + + def setUp(self): + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scopes = ['foo', 'bar'] + kwargs = {'scopes': self.mock_scopes} + self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, + **kwargs) + + def test_scopes(self): + self.assertEqual(self.conn.scopes, 'foo bar') + + def test_add_default_headers(self): + old_headers = {} + expected_headers = { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Host': 'accounts.google.com'} + new_headers = self.conn.add_default_headers(old_headers) + self.assertEqual(new_headers, expected_headers) + + def test_token_request(self): + request_body = {'code': 'asdf', 'client_id': self.conn.user_id, + 'client_secret': self.conn.key, + 'redirect_uri': self.conn.redirect_uri, + 'grant_type': 'authorization_code'} + new_token = self.conn._token_request(request_body) + self.assertEqual(new_token['access_token'], 'installedapp') + self.assertEqual(new_token['expire_time'], '2013-06-26T20:00:00Z') + + +class GoogleInstalledAppAuthConnectionTest(LibcloudTestCase): + """ + Tests for GoogleInstalledAppAuthConnection + """ + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + + def setUp(self): + GoogleInstalledAppAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scopes = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scopes': self.mock_scopes} + self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, + **kwargs) + + def test_refresh_token(self): + # This token info doesn't have a refresh token, so a new token will be + # requested + token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer', + 'expires_in': 3600} + new_token1 = self.conn.refresh_token(token_info1) + self.assertEqual(new_token1['access_token'], 'installedapp') + + # This token info has a refresh token, so it will be able to be + # refreshed. + token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer', + 'expires_in': 3600, 'refresh_token': 'refreshrefresh'} + new_token2 = self.conn.refresh_token(token_info2) + self.assertEqual(new_token2['access_token'], 'refreshrefresh') + + # Both sets should have refresh info + self.assertTrue('refresh_token' in new_token1) + self.assertTrue('refresh_token' in new_token2) + + +class GoogleBaseConnectionTest(LibcloudTestCase): + """ + Tests for GoogleBaseConnection + """ + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + GoogleServiceAcctAuthConnection.get_new_token = \ + lambda x: x._token_request({}) + GoogleBaseConnection._now = lambda x: datetime.datetime(2013, 6, 26, + 19, 0, 0) + + def setUp(self): + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + self.mock_scopes = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scopes': self.mock_scopes, 'auth_type': 'IA'} + self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + + def test_auth_type(self): + self.assertRaises(GoogleAuthError, GoogleBaseConnection, *GCE_PARAMS, + **{'auth_type': 'XX'}) + + kwargs = {'scopes': self.mock_scopes} + + if SHA256: + kwargs['auth_type'] = 'SA' + conn1 = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + self.assertTrue(isinstance(conn1.auth_conn, + GoogleServiceAcctAuthConnection)) + + kwargs['auth_type'] = 'IA' + conn2 = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + self.assertTrue(isinstance(conn2.auth_conn, + GoogleInstalledAppAuthConnection)) + + def test_add_default_headers(self): + old_headers = {} + new_expected_headers = {'Content-Type': 'application/json', + 'Host': 'www.googleapis.com'} + new_headers = self.conn.add_default_headers(old_headers) + self.assertEqual(new_headers, new_expected_headers) + + def test_pre_connect_hook(self): + old_params = {} + old_headers = {} + new_expected_params = {} + new_expected_headers = {'Authorization': 'Bearer installedapp'} + new_params, new_headers = self.conn.pre_connect_hook(old_params, + old_headers) + self.assertEqual(new_params, new_expected_params) + self.assertEqual(new_headers, new_expected_headers) + + def test_encode_data(self): + data = {'key': 'value'} + json_data = '{"key": "value"}' + encoded_data = self.conn.encode_data(data) + self.assertEqual(encoded_data, json_data) + + def test_has_completed(self): + body1 = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "status": "DONE", + "targetId": "16211908079305042870"} + body2 = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "status": "RUNNING", + "targetId": "16211908079305042870"} + response1 = MockJsonResponse(body1) + response2 = MockJsonResponse(body2) + self.assertTrue(self.conn.has_completed(response1)) + self.assertFalse(self.conn.has_completed(response2)) + + def test_get_poll_request_kwargs(self): + body = {"endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "kind": "compute#operation", + "selfLink": "https://www.googleapis.com/operations-test"} + response = MockJsonResponse(body) + expected_kwargs = {'action': + 'https://www.googleapis.com/operations-test'} + kwargs = self.conn.get_poll_request_kwargs(response, None, {}) + self.assertEqual(kwargs, expected_kwargs) + + def test_morph_action_hook(self): + self.conn.request_path = '/compute/apiver/project/project-name' + action1 = ('https://www.googleapis.com/compute/apiver/project' + '/project-name/instances') + action2 = '/instances' + expected_request = '/compute/apiver/project/project-name/instances' + request1 = self.conn.morph_action_hook(action1) + request2 = self.conn.morph_action_hook(action2) + self.assertEqual(request1, expected_request) + self.assertEqual(request2, expected_request) + + +class GoogleAuthMockHttp(MockHttp): + """ + Mock HTTP Class for Google Auth Connections. + """ + json_hdr = {'content-type': 'application/json; charset=UTF-8'} + + def _o_oauth2_token(self, method, url, body, headers): + token_info = {'access_token': 'tokentoken', + 'token_type': 'Bearer', + 'expires_in': 3600} + refresh_token = {'access_token': 'refreshrefresh', + 'token_type': 'Bearer', + 'expires_in': 3600} + ia_token = {'access_token': 'installedapp', + 'token_type': 'Bearer', + 'expires_in': 3600, + 'refresh_token': 'refreshrefresh'} + if 'code' in body: + body = json.dumps(ia_token) + elif 'refresh_token' in body: + body = json.dumps(refresh_token) + else: + body = json.dumps(token_info) + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/common/test_openstack.py libcloud-0.15.1/libcloud/test/common/test_openstack.py --- libcloud-0.5.0/libcloud/test/common/test_openstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/common/test_openstack.py 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from mock import Mock + +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.utils.py3 import PY25 + + +class OpenStackBaseConnectionTest(unittest.TestCase): + + def setUp(self): + self.timeout = 10 + OpenStackBaseConnection.conn_classes = (None, Mock()) + self.connection = OpenStackBaseConnection('foo', 'bar', + timeout=self.timeout, + ex_force_auth_url='https://127.0.0.1') + self.connection.driver = Mock() + self.connection.driver.name = 'OpenStackDriver' + + def test_base_connection_timeout(self): + self.connection.connect() + self.assertEqual(self.connection.timeout, self.timeout) + if PY25: + self.connection.conn_classes[1].assert_called_with(host='127.0.0.1', + port=443) + else: + self.connection.conn_classes[1].assert_called_with(host='127.0.0.1', + port=443, + timeout=10) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/dcs.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/dcs.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/dcs.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/dcs.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,78 @@ + + + + + + + + + 2 + barcelona + barcelona + + + + + + 3 + 1 + VIRTUAL_FACTORY + http://10.60.12.7:80/virtualfactory + + + + + + 4 + 1 + VIRTUAL_SYSTEM_MONITOR + http://10.60.12.7:80/vsm + + + + + + 5 + 1 + APPLIANCE_MANAGER + http://10.60.12.7:80/am + + + + + + 6 + 1 + NODE_COLLECTOR + http://10.60.12.7:80/nodecollector + + + + + + 7 + 1 + STORAGE_SYSTEM_MONITOR + http://10.60.12.7:80/ssm + + + + + 8 + 1 + DHCP_SERVICE + omapi://10.60.12.7:7911 + + + + + + 9 + 1 + BPM_SERVICE + http://10.60.12.7:80/bpm-async + + + Abiquo + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,28 @@ + + + + + + + + + + + + + 11 + m0n0wall-vhd + m0n0wall image in VHD format ready for XenServer and HyperV + 1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhd + VHD_SPARSE + 10490880 + 1 + 128 + 27262976 + false + 0 + 2013-01-10T20:25:12-05:00 + SYSTEM + false + http://icons.abiquo.com/monowall.jpg + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + 11m0n0wall-vhdm0n0wall image in VHD format ready for XenServer and HyperV1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhdVHD_SPARSE10490880112827262976false02013-01-10T20:25:12-05:00SYSTEMfalsehttp://icons.abiquo.com/monowall.jpg + + + + + + + + + + + + 19 + RHEL6 Build Bot + RHEL6 Build Bot + 1/rs/abiport9000/ovf/73/build-bot-rhel6-disk1.vmdk + VMDK_STREAM_OPTIMIZED + 351064576 + 1 + 1024 + 4294967296 + false + 0 + 2013-01-10T20:25:12-05:00 + SYSTEM + false + http://rs.bcn.abiquo.com:9000/public/icons/q.png + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + + + +2virtual image repo010.60.1.72:/opt/vm_repository0 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + 2virtual image repo010.60.1.72:/opt/vm_repository0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/ent_1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/ent_1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + false + Abiquo + 0 + 0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/login.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/login.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/login.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/login.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + true + ABIQUO + Standard user + + 2 + en_US + Standard + user + c69a39bd64ffb77ea7ee3369dce742f3 + User + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/not_found_error.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/not_found_error.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/not_found_error.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/not_found_error.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + DC-0 + The requested datacenter does not exist + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/privilege_errors.html libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/privilege_errors.html --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/privilege_errors.html 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/privilege_errors.html 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + Apache Tomcat/6.0.35 - Error report + + + +

HTTP Status 403 - Access is denied

+
+

+ type + Status report

+

+ message + Access is denied +

+

+ description + Access to the specified resource (Access is denied) has been forbidden. +

+
+

Apache Tomcat/6.0.35

+ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/unauthorized_user.html libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/unauthorized_user.html --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/unauthorized_user.html 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/unauthorized_user.html 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + Apache Tomcat/6.0.35 - Error report + + + +

HTTP Status 401 - Bad credentials

+
+

+ type + Status report

+

+ message + Bad credentials +

+

+ description + This request requires HTTP authentication (Bad credentials). +

+
+

Apache Tomcat/6.0.35

+ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_needs_sync.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + 0 + 0 + 5 + libcloud_test_group + 0 + NEEDS_SYNC + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vm_3_not_allocated.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + 1 + 27262976 + 0 + 3 + 1 + 1 + ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614 + 128 + NOT_ALLOCATED + 914f8125-33d3-4fe3-a162-5d6f5bf32614 + 0 + 3 + node-name + 0 + 0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5_vms.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf3261403node-name00 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + 0 + 0 + 5 + libcloud_test_group + 0 + NOT_DEPLOYED + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task_failed.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ + + + + + + + Undeploy task's power off on virtual machine with id 3 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.103a1a6b-4de2-48d8-9a38-8698561020b8 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + UNKNOWN + DONE + 1358012669 + POWER_OFF + + + Undeploy task's deconfigure on virtual machine with id 3 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.e4250ca9-505d-4640-9ad2-fb101f9e9978 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + UNKNOWN + DONE + 1358012669 + DECONFIGURE + + + 3 + FINISHED_UNSUCCESSFULLY + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + 1358012669 + UNDEPLOY + admin + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy_task.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ + + + + + + + Undeploy task's power off on virtual machine with id 3 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.103a1a6b-4de2-48d8-9a38-8698561020b8 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + UNKNOWN + DONE + 1358012669 + POWER_OFF + + + Undeploy task's deconfigure on virtual machine with id 3 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a.e4250ca9-505d-4640-9ad2-fb101f9e9978 + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + UNKNOWN + DONE + 1358012669 + DECONFIGURE + + + 3 + FINISHED_SUCCESSFULLY + 1da8c8b6-86f6-49ef-9d29-57dcc73b875a + 1358012669 + UNDEPLOY + admin + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_undeploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_allocated.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf3261459013node-name0080.12.23.43 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deployed.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ON914f8125-33d3-4fe3-a162-5d6f5bf3261459013node-name0080.12.23.43 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task_failed.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ + + + + + + + Deploy task's configure on virtual machine with id 3 + b44fe278-6b0f-4dfb-be81-7c03006a93cb.a04623bd-7b2c-4c23-9441-aeaa39dd4893 + b44fe278-6b0f-4dfb-be81-7c03006a93cb + UNKNOWN + DONE + 1357872859 + CONFIGURE + + + Deploy task's power on on virtual machine with id 3 + b44fe278-6b0f-4dfb-be81-7c03006a93cb.2fdee19a-4fad-4040-bc94-7acfd6fedc48 + b44fe278-6b0f-4dfb-be81-7c03006a93cb + UNKNOWN + DONE + 1357872859 + POWER_ON + + + 3 + ABORTED + b44fe278-6b0f-4dfb-be81-7c03006a93cb + 1357872859 + DEPLOY + user + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy_task.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ + + + + + + + Deploy task's configure on virtual machine with id 3 + b44fe278-6b0f-4dfb-be81-7c03006a93cb.a04623bd-7b2c-4c23-9441-aeaa39dd4893 + b44fe278-6b0f-4dfb-be81-7c03006a93cb + UNKNOWN + DONE + 1357872859 + CONFIGURE + + + Deploy task's power on on virtual machine with id 3 + b44fe278-6b0f-4dfb-be81-7c03006a93cb.2fdee19a-4fad-4040-bc94-7acfd6fedc48 + b44fe278-6b0f-4dfb-be81-7c03006a93cb + UNKNOWN + DONE + 1357872859 + POWER_ON + + + 3 + FINISHED_SUCCESSFULLY + b44fe278-6b0f-4dfb-be81-7c03006a93cb + 1357872859 + DEPLOY + user + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_deploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + +You can keep track of the progress in the link diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_nics.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + 64192.168.0.252:54:00:b7:f7:850 + + + 36434.34.34.552:54:00:b7:f7:880 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset_task.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + Reset task's reset on virtual machine with id 3 + a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 + a8c9818e-f389-45b7-be2c-3db3a9689940 + ROLLBACK_DONE + FAILED + 1357873142 + RESET + + + 3 + FINISHED_SUCCESSFULLY + a8c9818e-f389-45b7-be2c-3db3a9689940 + 1357873142 + RESET + user + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_reset.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + You can keep track of the progress in the link + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task_failed.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + Reset task's reset on virtual machine with id 3 + a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 + a8c9818e-f389-45b7-be2c-3db3a9689940 + ROLLBACK_DONE + FAILED + 1357873142 + RESET + + + 3 + FINISHED_UNSUCCESSFULLY + a8c9818e-f389-45b7-be2c-3db3a9689940 + 1357873142 + RESET + user + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy_task.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + Reset task's reset on virtual machine with id 3 + a8c9818e-f389-45b7-be2c-3db3a9689940.5f42a7fc-82f3-4121-be26-da62eb8b9b92 + a8c9818e-f389-45b7-be2c-3db3a9689940 + ROLLBACK_DONE + FAILED + 1357873142 + RESET + + + 3 + FINISHED_SUCCESSFULLY + a8c9818e-f389-45b7-be2c-3db3a9689940 + 1357873142 + RESET + user + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3_undeploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + You can keep track of the progress in the link + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_3.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf3261459013node-name0080.12.23.43 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vm_creation_ok.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + +1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf326140 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms_allocated.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf3261403node-name00 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6_vms.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + 1272629760311ABQ_914f8125-33d3-4fe3-a162-5d6f5bf32614128NOT_ALLOCATED914f8125-33d3-4fe3-a162-5d6f5bf3261403node-name00 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_6.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,19 @@ + + + + + + + + + + + + 0 + 0 + 6 + libcloud + <connections/> + 0 + DEPLOYED + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapp_creation_ok.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + 0 + 0 + 5 + libcloud_test_group + 0 + NOT_DEPLOYED + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4_vapps.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,25 @@ + + + + 2 + + + + + + + + + + 006libcloud0DEPLOYED + + + + + + + + + + 005libcloud_test_group0NOT_DEPLOYED + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdc_4.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdc_4.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + KVM + 4 + vdc_kvm + + + + +
192.168.0.0
+ + 192.168.0.1 + 2 + 24 + default_private_network + + + + INTERNAL +
+
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdcs.xml libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdcs.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/abiquo/vdcs.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/abiquo/vdcs.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + KVM + 4 + vdc_kvm + + + + +
192.168.0.0
+ + 192.168.0.1 + 2 + 24 + default_private_network + + + + INTERNAL +
+
+
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_block_products_json.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_block_products_json.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_block_products_json.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_block_products_json.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +[{"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}, {"cost": 0.25, "id": "b412f354-5056-4bf0-a42f-6ddd998aa092", "description": "Block 2GB Virtual Server"}, {"cost": 0.35, "id": "0cd183d3-0287-4b1a-8288-b3ea8302ed58", "description": "Block 4GB Virtual Server"}, {"cost": 0.45, "id": "b9b87a5b-2885-4a2e-b434-44a163ca6251", "description": "Block 8GB Virtual Server"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"text":"Block destroyed."} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"ips": [{"address": "67.214.214.212"}], "memory": 1073741824, "template": "centos", "id": "99df878c-6e5c-4945-a635-d94da9fd3146", "storage": 21474836480, "hostname": "apitest.c44905.c44905.blueboxgrid.com", "description": "1 GB RAM + 20 GB Disk", "cpu": 0.5, "status": "running", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "status": "ok", "text": "Reboot initiated." } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_json.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_json.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_json.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_json.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +[{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"running"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_blocks_json_post.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"queued", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/bluebox/api_block_templates_json.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +[{"public": true, "id": "c66b8145-f768-45ef-9878-395bf8b1b7ff", "description": "CentOS 5 (Latest Release)", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "1fc24f51-6d7d-4fa9-9a6e-0d6f36b692e2", "description": "Ubuntu 8.10 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b6f152db-988c-4194-b292-d6dd2aa2dbab", "description": "Debian 5.0 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "4b697e48-282b-4140-8cf8-142e2a2711ee", "description": "Ubuntu 8.04 LTS 64bit", "created": "2009/07/31 15:58:20 -0700"}, {"public": true, "id": "a6a141bf-592a-4fa6-b130-4c14f69e82d0", "description": "Ubuntu 8.04 LTS 32Bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b181033f-aea7-4e6c-8bb4-11169775c0f8", "description": "Ubuntu 9.04 64bit", "created": "2010/01/26 11:31:19 -0800"}, {"public": true, "id": "b5371c5a-9da2-43ee-a745-99a4723f624c", "description": "ArchLinux 2009.08 64bit", "created": "2010/02/13 18:07:01 -0800"}, {"public": true, "id": "a00baa8f-b5d0-4815-8238-b471c4c4bf72", "description": "Ubuntu 9.10 64bit", "created": "2010/02/17 22:06:21 -0800"}, {"public": true, "id": "03807e08-a13d-44e4-b011-ebec7ef2c928", "description": "Ubuntu 10.04 LTS 64bit", "created": "2010/05/04 14:43:30 -0700"}, {"public": true, "id": "8b60e6de-7cbc-4c8e-b7df-5e2f9c4ffd6b", "description": "Ubuntu 10.04 LTS 32bit", "created": "2010/05/04 14:43:30 -0700"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_cloud_ip.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,19 @@ +{ + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "id": "cip-jsjc5", + "interface": null, + "load_balancer": null, + "public_ip": "109.107.37.234", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-37-234.gb1.brightbox.com", + "server": null, + "server_group": null, + "status": "unmapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-jsjc5" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_a.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,76 @@ +{ + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "cloud_ips": [], + "console_token": null, + "console_token_expires": null, + "console_url": null, + "created_at": "2012-04-12T10:58:49Z", + "deleted_at": null, + "fqdn": "srv-p61uj.gb1.brightbox.com", + "hostname": "srv-p61uj", + "id": "srv-p61uj", + "image": { + "arch": "x86_64", + "created_at": "2012-04-11T01:36:19Z", + "description": "Standard server with cloud-init", + "id": "img-n4yek", + "name": "Ubuntu Precise 12.04 LTS server", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "precise-amd64-17903.gz", + "status": "deprecated", + "url": "https://api.gb1.brightbox.com/1.0/images/img-n4yek", + "username": "ubuntu" + }, + "interfaces": [ + { + "id": "int-ctud9", + "ipv4_address": "10.240.156.30", + "ipv6_address": "2a02:1348:14c:2707:24:19ff:fef0:9c1e", + "mac_address": "02:24:19:f0:9c:1e", + "resource_type": "interface", + "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ctud9" + } + ], + "name": "Test Node", + "resource_type": "server", + "server_groups": [ + { + "created_at": "2011-08-24T08:41:56Z", + "default": true, + "description": "All new servers are added to this group unless specified otherwise.", + "id": "grp-irgkb", + "name": "default", + "resource_type": "server_group", + "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" + } + ], + "server_type": { + "cores": 2, + "disk_size": 20480, + "handle": "nano", + "id": "typ-4nssg", + "name": "Brightbox Nano Instance", + "ram": 512, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" + }, + "snapshots": [], + "started_at": null, + "status": "creating", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-p61uj", + "user_data": null, + "zone": { + "handle": "gb1-a", + "id": "zon-6mxqw", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server_gb1_b.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,76 @@ +{ + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "cloud_ips": [], + "console_token": null, + "console_token_expires": null, + "console_url": null, + "created_at": "2012-04-12T11:01:51Z", + "deleted_at": null, + "fqdn": "srv-nnumd.gb1.brightbox.com", + "hostname": "srv-nnumd", + "id": "srv-nnumd", + "image": { + "arch": "x86_64", + "created_at": "2012-04-11T01:36:19Z", + "description": "Standard server with cloud-init", + "id": "img-n4yek", + "name": "Ubuntu Precise 12.04 LTS server", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "precise-amd64-17903.gz", + "status": "deprecated", + "url": "https://api.gb1.brightbox.com/1.0/images/img-n4yek", + "username": "ubuntu" + }, + "interfaces": [ + { + "id": "int-2chhk", + "ipv4_address": "10.232.142.194", + "ipv6_address": "2a02:1348:14d:23b0:24:19ff:fee8:8ec2", + "mac_address": "02:24:19:e8:8e:c2", + "resource_type": "interface", + "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-2chhk" + } + ], + "name": "Test Node", + "resource_type": "server", + "server_groups": [ + { + "created_at": "2011-08-24T08:41:56Z", + "default": true, + "description": "All new servers are added to this group unless specified otherwise.", + "id": "grp-irgkb", + "name": "default", + "resource_type": "server_group", + "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" + } + ], + "server_type": { + "cores": 2, + "disk_size": 20480, + "handle": "nano", + "id": "typ-4nssg", + "name": "Brightbox Nano Instance", + "ram": 512, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" + }, + "snapshots": [], + "started_at": null, + "status": "creating", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-nnumd", + "user_data": null, + "zone": { + "handle": "gb1-b", + "id": "zon-remk1", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-remk1" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/create_server.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/create_server.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,62 @@ +{"id": "srv-3a97e", + "url": "servers/(server_id)", + "name": "My web server", + "status": "active", + "hostname": "srv-3a97e.gb1.brightbox.com", + "created_at": "", + "deleted_at": "", + "started_at": "", + "account": + {"id": "acc-3jd8s", + "url": "accounts/(account_id)", + "name": "Brightbox Systems Ltd.", + "status": "verified", + "ram_limit": 20480, + "ram_used": 2048, + "limits_cloudips": 5}, + "image": + {"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}, + "server_type": + {"id": "typ-a97e6", + "url": "server_types/(server_type_id)", + "handle": "nano", + "name": "Brightbox Nano", + "status": "", + "cores": 2, + "ram": 2048, + "disk_size": ""}, + "zone": + {"id": "zon-8ja0a", + "url": "zones/(zone_id)", + "handle": "gb1-a"}, + "snapshots": + [{"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}], + "cloud_ips": + [{"id": "cip-ja8ub", + "url": "cloud_ips/(cloud_ip_id)", + "public_ip": "109.107.42.129", + "status": "mapped", + "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], + "interfaces": + [{"id": "int-mc3a9", + "url": "interfaces/(interface_id)", + "mac_address": "02:24:19:6e:18:36", + "ipv4_address": "10.110.24.54"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_cloud_ips.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,96 @@ +[ + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "id": "cip-tlrp3", + "interface": null, + "load_balancer": null, + "public_ip": "109.107.35.16", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-35-16.gb1.brightbox.com", + "server": null, + "server_group": null, + "status": "unmapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-tlrp3" + }, + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "id": "cip-w8vbt", + "interface": null, + "load_balancer": null, + "public_ip": "109.107.35.76", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-35-76.gb1.brightbox.com", + "server": null, + "server_group": null, + "status": "unmapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-w8vbt" + }, + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "id": "cip-uswab", + "interface": { + "id": "int-ztqbx", + "ipv4_address": "10.240.228.234", + "ipv6_address": "2a02:1348:14c:393a:24:19ff:fef0:e4ea", + "mac_address": "02:24:19:f0:e4:ea", + "resource_type": "interface", + "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ztqbx" + }, + "load_balancer": null, + "public_ip": "109.107.35.105", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-35-105.gb1.brightbox.com", + "server": { + "created_at": "2012-01-30T14:42:28Z", + "deleted_at": null, + "fqdn": "srv-742vn.gb1.brightbox.com", + "hostname": "srv-742vn", + "id": "srv-742vn", + "name": "Kernel builder", + "resource_type": "server", + "started_at": "2012-03-28T15:26:43Z", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-742vn" + }, + "server_group": null, + "status": "mapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-uswab" + }, + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "id": "cip-ui4n1", + "interface": null, + "load_balancer": null, + "public_ip": "109.107.37.135", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-37-135.gb1.brightbox.com", + "server": null, + "server_group": null, + "status": "unmapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-ui4n1" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_images.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,89 @@ +[ + { + "ancestor": { + "arch": "i686", + "created_at": "2010-10-04T19:03:37Z", + "description": "Creates a blank disk", + "id": "img-6lybc", + "name": "Blank Image", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "blank_10G", + "status": "deleted", + "url": "https://api.gb1.brightbox.com/1.0/images/img-6lybc", + "username": null + }, + "arch": "i686", + "compatibility_mode": false, + "created_at": "2010-10-02T19:03:37Z", + "description": "login: root using stored ssh key", + "disk_size": 1086, + "id": "img-99q79", + "min_ram": null, + "name": "CentOS 5.5 server", + "official": true, + "owner": "acc-tqs4c", + "public": true, + "resource_type": "image", + "source": "srv-s4mfq", + "source_type": "upload", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/images/img-99q79", + "username": null, + "virtual_size": 10244 + }, + { + "ancestor": { + "arch": "i686", + "created_at": "2010-10-04T19:03:37Z", + "description": "Creates a blank disk", + "id": "img-6lybc", + "name": "Blank Image", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "blank_10G", + "status": "deleted", + "url": "https://api.gb1.brightbox.com/1.0/images/img-6lybc", + "username": null + }, + "arch": "x86_64", + "compatibility_mode": false, + "created_at": "2010-10-03T19:03:37Z", + "description": "login: root using stored ssh key", + "disk_size": 1133, + "id": "img-pnqnc", + "min_ram": null, + "name": "CentOS 5.5 server", + "official": true, + "owner": "acc-tqs4c", + "public": true, + "resource_type": "image", + "source": "srv-53fez", + "source_type": "upload", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/images/img-pnqnc", + "username": null, + "virtual_size": 10240 + }, + { + "ancestor": null, + "arch": "i686", + "compatibility_mode": false, + "created_at": "2012-01-22T05:36:24Z", + "description": "Standard server with cloud-init", + "disk_size": 671, + "id": "img-joo06", + "min_ram": null, + "name": "Ubuntu Oneiric 11.10 server", + "official": false, + "owner": "acc-tqs4c", + "public": true, + "resource_type": "image", + "source": "oneiric-i386-20178.gz", + "source_type": "upload", + "status": "deprecated", + "url": "https://api.gb1.brightbox.com/1.0/images/img-joo06", + "username": "ubuntu", + "virtual_size": 1025 + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_servers.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_servers.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_servers.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_servers.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,155 @@ + +[ + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "cloud_ips": [ + { + "id": "cip-tlrp3", + "public_ip": "109.107.35.16", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-35-16.gb1.brightbox.com", + "status": "mapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-tlrp3" + } + ], + "created_at": "2010-10-14T10:02:38Z", + "deleted_at": null, + "fqdn": "srv-xvpn7.gb1.brightbox.com", + "hostname": "srv-xvpn7", + "id": "srv-xvpn7", + "image": { + "arch": "i686", + "created_at": "2010-10-11T15:23:51Z", + "description": "", + "id": "img-arm8f", + "name": "Snapshot of srv-vf2a4 11 Oct 15:23", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "srv-vf2a4", + "status": "deleted", + "url": "https://api.gb1.brightbox.com/1.0/images/img-arm8f", + "username": null + }, + "interfaces": [ + { + "id": "int-519up", + "ipv4_address": "10.74.210.210", + "mac_address": "02:24:19:4a:d2:d2", + "resource_type": "interface", + "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-519up" + } + ], + "name": "Ubuntu Image Builder Box", + "resource_type": "server", + "server_groups": [ + { + "created_at": "2011-08-24T08:41:56Z", + "default": true, + "description": "All new servers are added to this group unless specified otherwise.", + "id": "grp-irgkb", + "name": "default", + "resource_type": "server_group", + "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" + } + ], + "server_type": { + "cores": 4, + "disk_size": 81920, + "handle": "small", + "id": "typ-urtky", + "name": "Brightbox Small Instance", + "ram": 2048, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-urtky" + }, + "snapshots": [], + "started_at": "2012-03-28T15:25:56Z", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-xvpn7", + "zone": { + "handle": "gb1-a", + "id": "zon-6mxqw", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" + } + }, + { + "account": { + "id": "acc-tqs4c", + "name": "bbctest", + "resource_type": "account", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/account" + }, + "cloud_ips": [], + "created_at": "2012-01-30T14:42:28Z", + "deleted_at": null, + "fqdn": "srv-742vn.gb1.brightbox.com", + "hostname": "srv-742vn", + "id": "srv-742vn", + "image": { + "arch": "x86_64", + "created_at": "2012-01-30T13:25:09Z", + "description": "", + "id": "img-j93gd", + "name": "Snapshot of srv-k0pug 30 Jan 13:25", + "owner": "acc-tqs4c", + "resource_type": "image", + "source": "srv-k0pug", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/images/img-j93gd", + "username": null + }, + "interfaces": [ + { + "id": "int-ztqbx", + "ipv4_address": "10.240.228.234", + "ipv6_address": "2a02:1348:14c:393a:24:19ff:fef0:e4ea", + "mac_address": "02:24:19:f0:e4:ea", + "resource_type": "interface", + "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ztqbx" + } + ], + "name": "Kernel builder", + "resource_type": "server", + "server_groups": [ + { + "created_at": "2011-08-24T08:41:56Z", + "default": true, + "description": "All new servers are added to this group unless specified otherwise.", + "id": "grp-irgkb", + "name": "default", + "resource_type": "server_group", + "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-irgkb" + } + ], + "server_type": { + "cores": 8, + "disk_size": 163840, + "handle": "medium", + "id": "typ-qdiwq", + "name": "Brightbox Medium Instance", + "ram": 4096, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-qdiwq" + }, + "snapshots": [], + "started_at": "2012-03-28T15:26:43Z", + "status": "active", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-742vn", + "zone": { + "handle": "gb1-a", + "id": "zon-6mxqw", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" + } + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_server_types.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_server_types.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_server_types.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_server_types.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,79 @@ +[ + { + "cores": 2, + "disk_size": 20480, + "handle": "nano", + "id": "typ-4nssg", + "name": "Brightbox Nano Instance", + "ram": 512, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-4nssg" + }, + { + "cores": 8, + "disk_size": 163840, + "handle": "medium", + "id": "typ-qdiwq", + "name": "Brightbox Medium Instance", + "ram": 4096, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-qdiwq" + }, + { + "cores": 4, + "disk_size": 81920, + "handle": "small", + "id": "typ-urtky", + "name": "Brightbox Small Instance", + "ram": 2048, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-urtky" + }, + { + "cores": 8, + "disk_size": 327680, + "handle": "large", + "id": "typ-mlbt7", + "name": "Brightbox Large Instance", + "ram": 8192, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-mlbt7" + }, + { + "cores": 4, + "disk_size": 40960, + "handle": "mini", + "id": "typ-iqisj", + "name": "Brightbox Mini Instance", + "ram": 1024, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-iqisj" + }, + { + "cores": 8, + "disk_size": 655360, + "handle": "xl", + "id": "typ-wdicw", + "name": "Brightbox XL Instance", + "ram": 16384, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-wdicw" + }, + { + "cores": 8, + "disk_size": 1310720, + "handle": "xxl", + "id": "typ-lr76m", + "name": "Brightbox XXL Instance", + "ram": 32768, + "resource_type": "server_type", + "status": "available", + "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-lr76m" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_zones.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_zones.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/list_zones.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/list_zones.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ +[ + { + "handle": "gb1-a", + "id": "zon-6mxqw", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-6mxqw" + }, + { + "handle": "gb1-b", + "id": "zon-remk1", + "resource_type": "zone", + "url": "https://api.gb1.brightbox.com/1.0/zones/zon-remk1" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/token.json libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/token.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/brightbox/token.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/brightbox/token.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"access_token":"k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_cloudspace_find.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_cloudspace_find.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_cloudspace_find.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_cloudspace_find.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + +jobguid + + +result + +3e4f8bd5-718c-457e-86f4-024c560d7c28 + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_lan_find.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_lan_find.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_lan_find.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_lan_find.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + +jobguid + + +result + +8d6a8b91-22fc-4be4-863b-11dc4456b315 + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_createFromTemplate.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_createFromTemplate.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_createFromTemplate.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_createFromTemplate.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +e7b75dd9-f4b0-4c11-8b3f-748306964ad7 + + +result +96b2af78-88a0-48a6-a5bd-258e1d00c0b9 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_delete.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_delete.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_delete.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_delete.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +ac93fbb6-6b5d-4248-a627-efba9f4d76c7 + + +result +1 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_physical.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_physical.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_physical.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_physical.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + +jobguid + + +result + +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_templates.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_templates.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_templates.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_templates.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + +jobguid + + +result + +0c4da918-9f88-4049-a09c-8ab69142736a + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualdesktop.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualdesktop.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualdesktop.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualdesktop.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid + + +result + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualserver.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualserver.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualserver.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_find_virtualserver.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + +jobguid + + +result + +dea11e50-1b53-4046-8589-cf52eb7b0d25 +c52e4a42-72fe-4f34-bb80-c57d237fcbf9 +64f325ef-28ac-4907-bd37-572a13178edd +9a6b3101-b4ac-4ecb-b114-67d89994ac9b +1dd57d0d-0e23-471d-9f34-b673c7c18bc3 +d3c98151-f064-45fc-a90a-23c481723895 +01dedf71-0c37-441e-9687-085f8bb116ea + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_01dedf71-0c37-441e-9687-085f8bb116ea.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_01dedf71-0c37-441e-9687-085f8bb116ea.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_01dedf71-0c37-441e-9687-085f8bb116ea.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_01dedf71-0c37-441e-9687-085f8bb116ea.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +01dedf71-0c37-441e-9687-085f8bb116ea + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +CONFIGURED + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description +AUTOTEST_94a50c23-715d-4fbc-8cdf-c3f436e04ec0 + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +AUTOTEST_94a50c23-715d-4fbc-8cdf-c3f436e04ec0 + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_0c4da918-9f88-4049-a09c-8ab69142736a.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_0c4da918-9f88-4049-a09c-8ab69142736a.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_0c4da918-9f88-4049-a09c-8ab69142736a.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_0c4da918-9f88-4049-a09c-8ab69142736a.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,105 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +0c4da918-9f88-4049-a09c-8ab69142736a + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid +2 + + +system +0 + + +template +1 + + +memory +512 + + +agentguid + + +status +IMAGEONLY + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description +template_ubuntu_esx + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +411f22e3-2e04-431b-adc7-2a952d2cadc6 + + +name +ubuntu_esx + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_1dd57d0d-0e23-471d-9f34-b673c7c18bc3.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_1dd57d0d-0e23-471d-9f34-b673c7c18bc3.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_1dd57d0d-0e23-471d-9f34-b673c7c18bc3.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_1dd57d0d-0e23-471d-9f34-b673c7c18bc3.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +1 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +1dd57d0d-0e23-471d-9f34-b673c7c18bc3 + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid +3 + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +RUNNING + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +testvm_running + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_2aea45ee-3ea5-4b4f-88f0-7d4d48bed643.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_2aea45ee-3ea5-4b4f-88f0-7d4d48bed643.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_2aea45ee-3ea5-4b4f-88f0-7d4d48bed643.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_2aea45ee-3ea5-4b4f-88f0-7d4d48bed643.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole +COMPUTENODE + + +isbackup +0 + + +guid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +machinetype +PHYSICAL + + +hostname +VMWARE-ESX-10-101-163-1 + + +vmachinemanagerid + + +system +0 + + +template +0 + + +memory +262111 + + +agentguid + + +status +RUNNING + + +nrcpu +24 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid + + +name +VMWARE-ESX-10-101-163-1 + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +esxi5 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_64f325ef-28ac-4907-bd37-572a13178edd.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_64f325ef-28ac-4907-bd37-572a13178edd.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_64f325ef-28ac-4907-bd37-572a13178edd.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_64f325ef-28ac-4907-bd37-572a13178edd.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +64f325ef-28ac-4907-bd37-572a13178edd + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid +9 + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +RUNNING + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +testvm_clone + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_96b2af78-88a0-48a6-a5bd-258e1d00c0b9.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_96b2af78-88a0-48a6-a5bd-258e1d00c0b9.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_96b2af78-88a0-48a6-a5bd-258e1d00c0b9.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_96b2af78-88a0-48a6-a5bd-258e1d00c0b9.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +96b2af78-88a0-48a6-a5bd-258e1d00c0b9 + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +CREATED + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description +node-name + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +node-name + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_9a6b3101-b4ac-4ecb-b114-67d89994ac9b.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_9a6b3101-b4ac-4ecb-b114-67d89994ac9b.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_9a6b3101-b4ac-4ecb-b114-67d89994ac9b.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_9a6b3101-b4ac-4ecb-b114-67d89994ac9b.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +1 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +9a6b3101-b4ac-4ecb-b114-67d89994ac9b + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid +7 + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +STOPPING + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +testvm_stopped + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_c52e4a42-72fe-4f34-bb80-c57d237fcbf9.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_c52e4a42-72fe-4f34-bb80-c57d237fcbf9.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_c52e4a42-72fe-4f34-bb80-c57d237fcbf9.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_c52e4a42-72fe-4f34-bb80-c57d237fcbf9.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + +jobguid + + +result + + + +backup +1 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +c52e4a42-72fe-4f34-bb80-c57d237fcbf9 + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +CREATED + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +testvm_created + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_d3c98151-f064-45fc-a90a-23c481723895.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_d3c98151-f064-45fc-a90a-23c481723895.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_d3c98151-f064-45fc-a90a-23c481723895.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_d3c98151-f064-45fc-a90a-23c481723895.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +0 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +d3c98151-f064-45fc-a90a-23c481723895 + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +CONFIGURED + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description +AUTOTEST_ec71742b-bd74-4908-8327-717c349e0d79 + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +AUTOTEST_ec71742b-bd74-4908-8327-717c349e0d79 + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_dea11e50-1b53-4046-8589-cf52eb7b0d25.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_dea11e50-1b53-4046-8589-cf52eb7b0d25.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_dea11e50-1b53-4046-8589-cf52eb7b0d25.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_list_machineguid_dea11e50-1b53-4046-8589-cf52eb7b0d25.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + +jobguid + + +result + + + +backup +1 + + +replicationstatus + + +machinerole + + +isbackup +0 + + +guid +dea11e50-1b53-4046-8589-cf52eb7b0d25 + + +machinetype +VIRTUALSERVER + + +hostname + + +vmachinemanagerid +5 + + +system +0 + + +template +0 + + +memory +512 + + +agentguid + + +status +PAUSED + + +nrcpu +1 + + +bootstatus +FROMDISK + + +description + + +importancefactor +5 + + +replicationrole + + +parentmachineguid +2aea45ee-3ea5-4b4f-88f0-7d4d48bed643 + + +name +testvm_paused + + +hypervisor +VMWARE_ESX + + +replicationtype + + +os +ubuntu1010 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_listSnapshots.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_listSnapshots.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_listSnapshots.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_listSnapshots.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,82 @@ + + + + + + +jobguid + + +result + + + +description + + +backuplabel +somelabel + + +timestamp +2013-07-25 13:20:52.093959 + + +consistent +t + + +name +node-name-2013-07-25 13:20:52.093931 + + +snapshottype +PAUSED + + +guid +4685d9b5-80e8-44d8-a094-f6f799f90954 + + +parentmachineguid +26348bf8-2e2f-41e8-83c3-11fb5d14c9d2 + + + + +description + + +backuplabel + + +timestamp +2013-07-25 13:19:39.437168 + + +consistent +t + + +name +node-name-2013-07-25 13:19:39.437142 + + +snapshottype +PAUSED + + +guid +5c00bd9e-6c18-4bee-83bf-5ff14426868a + + +parentmachineguid +96b2af78-88a0-48a6-a5bd-258e1d00c0b9 + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_reboot.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_reboot.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_reboot.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_reboot.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +ab93fcb6-6b5d-4248-a627-efba9f4d76c7 + + +result +1 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_rollback.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_rollback.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_rollback.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_rollback.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +ababacb6-7a5d-4248-a627-efba9f4d76c7 + + +result +1 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_snapshot.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_snapshot.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_snapshot.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_snapshot.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +16556e13-a2f9-492c-b094-dc62bc7d2cc9 + + +result +5c00bd9e-6c18-4bee-83bf-5ff14426868a + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_start.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_start.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_start.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_start.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +ab93fcb6-7a5d-4248-a627-efba9f4d76c7 + + +result +1 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_stop.xml libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_stop.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudframes/_machine_stop.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudframes/_machine_stop.xml 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + +jobguid +ab93fcb6-7a5d-4248-a627-efba9f4d76c7 + + +result +1 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_clone.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,19 @@ +status active +use dbserver,general +name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub clone +bits 64 +url http://www.microsoft.com/sqlserver/2008/en/us/ +read:bytes 4096 +description Please refer to the install notes for a full guide to initial configuration. +write:bytes 21474840576 +drive a814def5-1789-49a0-bf88-7abe7bb1682a +install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +os windows +user 93b34fd9-7986-4b25-8bfd-98a50383605d +read:requests 1 +licenses msft_p73_04837 msft_tfa_00009 +type disk +write:requests 5242881 +size 21474836480 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_info.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_info.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_info.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_info.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,39 @@ +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive 3d18db4b-f9bd-4313-b034-12ae181efa88 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 53687091200 + +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive 3d18db4b-f9bd-4313-b034-12ae181efa99 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 103687091200 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_single_info.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,19 @@ +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive d18119ce_7afa_474a_9242_e0384b160220 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 53687091200 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/drives_standard_info.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,1735 @@ +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Yoper is a multipurpose high performance operating system which has been carefully optimised for PC's with either 686 or higher processor types. The binaries that come with Yoper have been built from scratch using the original sources combined with the best features of major distros, measuring up to the demanding proliferation of network communications and more intensive digital multimedia, graphics and audio capabilities which are ushering in a new era of business productivity enabled by a new generation of sophisticated microprocessors, and business application tools. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 102401 +name Yoper 2010 Linux with XFCE Install CD +url http://yoper-linux.org/ +read:bytes 4096 +claim:type shared +drive 7e3e7628-d1e6-47c6-858d-7b54aac5c916 +write:bytes 419434496 +read:requests 1 +os linux + +type cdrom +size 2621440000 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Ultimate Edition, first released in December 2006, is a fork of Ubuntu. The goal of the project is to create a complete, seamlessly integrated, visually stimulating, and easy-to-install operating system. Single-button upgrade is one of several special characteristics of this distribution. Other main features include custom desktop and theme with 3D effects, support for a wide range of networking options, including WiFi and Bluetooth, and integration of many extra applications and package repositories. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 640001 +name Ultimate 2.6 Linux 64bit Install CD +url http://ultimateedition.info/ +read:bytes 440279040 +claim:type shared +drive 526ed5cb-6fbe-46fb-a064-7707c844d774 +write:bytes 2621444096 +read:requests 107490 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 168961 +name FreeBSD 8.0 Linux 64bit Install CD +url http://www.freebsd.org/ +read:bytes 479866880 +claim:type shared +drive 95380e4c-4f69-432d-be2b-1965a282bdb9 +write:bytes 692064256 +read:requests 117155 +os other + +type cdrom +size 218103808 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description The Fedora 13 x86_64 (64bit) network installation CD +favourite true +install_notes The Fedora 13 network installaton cd will install, through the network, the latest Fedora packages; since it includes the "updates" repo.\n\nThe minimal install option offers great ground to build on top of a very nice base. This configuration is recommended for most servers.\n\nBuild your own and share them wth us! +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 53249 +name Fedora 13 Linux x86 64bit netinst Install CD +url http://fedoraproject.org/ +read:bytes 1444963840 +claim:type shared +drive 14b1e97f-5bba-4cf1-aec4-7b7b573826c2 +write:bytes 218107904 +read:requests 352119 +os linux + +type cdrom +size 452984832 +use security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description The Untangle Lite package offers a collection of free, open-source software applications to run on the Untangle Server. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 110593 +name Untangle 7.4 Linux 64bit Install CD +url http://www.untangle.com/ +read:bytes 4096 +claim:type shared +drive 06c39099-9f75-40f4-b2e1-6012c87f3579 +write:bytes 452988928 +read:requests 1 +os linux + +type cdrom +size 138412032 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description Puppy Linux is extraordinarily small, yet quite full featured. Puppy boots into a 64MB ramdisk, and that's it, the whole caboodle runs in RAM. Unlike live CD distributions that have to keep pulling stuff off the CD, Puppy in its entirety loads into RAM. This means that all applications start in the blink of an eye and respond to user input instantly. Puppy Linux has the ability to boot off a flash card or any USB memory device, CDROM, Zip disk or LS/120/240 Superdisk, floppy disks, internal hard drive. It can even use a multisession formatted CD-R/DVD-R to save everything back to the CD/DVD with no hard drive required at all! +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 33793 +name Puppy 5.1 Linux Install CD +url www.puppylinux.org +read:bytes 276828160 +claim:type shared +drive 60111502-6ff3-43e1-9485-5be775f81657 +write:bytes 138416128 +read:requests 67585 +os linux + +type cdrom +size 171966464 +use router,general,networking +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description Vyatta project is a Linux-based router and firewall offering a free community edition and two commercial editions with support. Vyatta has changed the networking world by developing the first commercially supported, open-source router & firewall solution. Vyatta combines the features, performance and reliability of an enterprise router & firewall with the cost savings, flexibility and security of open source. +favourite true +install_notes \nCD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 41985 +name Vyatta 6.1 Live CD +url www.yyatta.com +read:bytes 687869952 +claim:type shared +drive 8159ab9b-9703-48f6-a206-ac26efe8fdc2 +write:bytes 171970560 +read:requests 167937 +os linux + +type cdrom +size 721420288 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description ZenLive Linux is a live cd derived from Zenwalk distribution. Zenwalk Linux (formerly Minislack) is a Slackware-based Linux distribution with focus on Internet applications, multimedia and programming tools. ZenLive Linux LiveCD is a complete system with software for Internet browsing, mail, chat, multimedia and office, as well as for programming in C, Perl, Python and Ruby. The main objectives of Zenwalk Linux are to be simple and fast, provide one application per task, be a complete development and desktop environment and to be small enough to fit on a 400MB ISO image. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 176129 +name Zenlive 6.4 Linux Install CD +url http://www.zenwalk.org/ +read:bytes 721424384 +claim:type shared +drive fcc2aa68-24ce-438e-8386-1d4e66336155 +write:bytes 721424384 +read:requests 176129 +os linux + +type cdrom +claimed 00059836-5512-4ce2-bf66-4daab2d994e4:guest:2e82c87e-61a1-443c-bc81-5c3167df5c11:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:3234b1fc-415f-4019-ada1-27781aea8750:ide:0:0 +size 4198498304 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description CentOS as a group is a community of open source contributors and users. Typical CentOS users are organisations and individuals that do not need strong commercial support in order to achieve successful operation. CentOS is 100% compatible rebuild of the Red Hat Enterprise Linux, in full compliance with Red Hat's redistribution requirements. CentOS is for people who need an enterprise class operating system stability without the cost of certification and support. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1025025 +name CentOS 5.5 Linux 32bit Install DVD +url http://www.centos.org +read:bytes 16706375680 +claim:type shared +drive 6e0e2282-c29a-4d19-97e6-7ddb7cdf0dd2 +write:bytes 4198502400 +read:requests 4078705 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description Ubuntu is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit."Ubuntu&quot; is an ancient African word, meaning"humanity to others&quot;. The Ubuntu distribution brings the spirit of Ubuntu to the software world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Ubuntu 10.04 Linux 32bit Install CD +url http://www.ubuntu.com/ +read:bytes 1298436608 +claim:type shared +drive 0e305bb9-f512-4d4a-894c-4a733cae570f +write:bytes 734007296 +read:requests 295036 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd,livecd +status active +description Chakra is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 204801 +name Chakra Alpha 5 Linux 64bit Install and Live CD +url http://chakra-project.org/ +read:bytes 185200640 +claim:type shared +drive c0856590-c2b1-4725-9448-bba7c74d35dc +write:bytes 838864896 +read:requests 45215 +os linux + +type cdrom +claimed 00043e69-ac57-45b1-8692-75db24064fb9:guest:4c014a4e-615e-489e-b22a-bf966bce83d7:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:0a9d4833-fc5f-4825-9626-5a3e6555d329:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:71d09667-fd6b-491a-949f-6a7ab9c70907:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:c8264872-67a1-4452-a736-8dc6ef9eb07d:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:6efe92c3-0126-4ddb-9140-73706c804c3b:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:158c515f-1649-44f0-895c-f0de39575a1c:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:7d62f26e-2062-469e-846a-b926dffb00b1:ide:0:0 +size 4697620480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description - +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1146881 +name Debian Linux 5.0 Install CD +url http://www.debian.org/ +read:bytes 4612921344 +claim:type shared +drive 794a068d-228c-4758-81f0-e1bc955a6cce +write:bytes 4697624576 +read:requests 985768 +os linux + +type cdrom +size 2751463424 +use dev,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type livecd +status active +description Scientific Linux is a recompiled Red Hat Enterprise Linux put together by various labs and universities around the world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 674125 +name Scientific Linux 5.5 64bit Live CD +url https://www.scientificlinux.org/ +read:bytes 10903552 +claim:type shared +drive 7aa74ca3-4c64-4b08-9972-eddeb38a650d +write:bytes 2761216000 +read:requests 2662 +os linux + +type cdrom +size 612368384 +use networking,other +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Nexenta OS is a free and open source operating system combining the OpenSolaris kernel with GNU application userland. Nexenta OS runs on Intel/AMD 32-/64-bit hardware and is distributed as a single installable CD. Upgrades and binary packages not included on the CD can be installed from Nexenta OS repository using Advanced Packaging Tool. In addition, source based software components can be downloaded from network repositories available at Debian/GNU Linux and Ubuntu Linux. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 149357 +name NexentaStor 3.0.3 Linux 32bit/64bit Install CD +url http://www.nexenta.org/ +read:bytes 2822144 +claim:type shared +drive 2c3369a5-22eb-4462-8137-35a62b7a93cf +write:bytes 611766272 +read:requests 689 +os other + +type cdrom +size 301989888 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Zenwalk Linux (formerly Minislack) is a Slackware-based GNU/Linux operating system with a goal of being slim and fast by using only one application per task and with focus on graphical desktop and multimedia usage. Zenwalk features the latest Linux technology along with a complete programming environment and libraries to provide an ideal platform for application programmers. Zenwalk's modular approach also provides a simple way to convert Zenwalk Linux into a finely-tuned modern server (e.g. LAMP, messaging, file sharing). +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 73342 +name Zenwalk Core 6.4 Install CD +url http://yoper-linux.org/ +read:bytes 1576960 +claim:type shared +drive 3d58f1c6-9ec4-4963-917e-9917d39e5003 +write:bytes 300408832 +read:requests 385 +os linux + +type cdrom +size 67108864 +use general,security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description IPFire is a linux-distribution that focusses on easy setup, good handling and a high niveau of security. It is operable via an intuitive webinterface, which offers a lot of playground for beginners and even experienced administrators. IPFire is maintained by experienced developers, who are really concerned about security and regulary updates to keep it secure. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 16385 +name IPFire 2.7 Core 40 Linux 32bit Install CD +url http://www.ipfire.org/ +read:bytes 4096 +claim:type shared +drive 231aa9af-f2ef-407c-9374-76a1215b94d3 +write:bytes 67112960 +read:requests 1 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Legacy OS (formerly TEENpup Linux) is a distribution based on Puppy Linux. Although the original concept was to create a flavour of Puppy Linux with more applications and a more appealing desktop aimed at teenage users, Legacy OS has now grown to become a general purpose distribution. It comes with a large number of applications, browser plugins and media codecs as standard software. Despite these enhancements Legacy OS is still perfectly suitable for installation on older and low-resource computers, as well as modern hardware. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Legacy OS Linux 32bit Install CD +url http://pupweb.org/wikka/TeenPup +read:bytes 4096 +claim:type shared +drive 39f24226-dc6c-40e2-abc8-e8f2da976671 +write:bytes 734007296 +read:requests 1 +os linux + +type cdrom +size 209715200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Yohsuke Ooi has announced the release of Momonga Linux 7, a Japanese community distribution loosely modelled on Fedora. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 51201 +name Momonga 7 Linux 64bit Net Install CD +url http://www.momonga-linux.org/ +read:bytes 4096 +claim:type shared +drive f424888b-e66e-43f4-99c1-2991a5b82894 +write:bytes 209719296 +read:requests 1 +os linux + +type cdrom +size 713031680 +use general,security,systemrecovery +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description CAINE (Computer Aided INvestigative Environment) is an Ubuntu-based GNU/Linux live distribution created as a project of Digital Forensics for Interdepartmental Centre for Research on Security (CRIS), supported by the University of Modena and Reggio Emilia in Italy. The CAINE forensic framework contains a collection of tools wrapped up into a user-friendly environment. It introduces novel features - it aims to fill the interoperability gap across different forensic tools, it provides a homogeneous GUI that guides digital investigators during the acquisition and analysis of electronic evidence, and it offers a semi-automatic process for the documentation and report compilation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 174081 +name Caine 2.0 Linux 32bit Live CD +url http://www.caine-live.net/ +read:bytes 4096 +claim:type shared +drive 9768a0d1-e90c-44eb-8da7-06bca057cb93 +write:bytes 713035776 +read:requests 1 +os linux + +type cdrom +size 708837376 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Kongoni GNU/Linux is a Slackware-based, desktop-oriented GNU/Linux distribution and live CD. Its main features include a graphical installer, a Kongoni Integrated Setup System (KISS), and an easy-to-use Ports Installation GUI (PIG). The distribution's package management borrows its main concepts from BSD ports, with an intuitive graphical package installer that compiles and installs programs from source code on the user's system. Kongoni, which means gnu (also known as wildebeest) in Shona, includes only software that complies with Free Software Foundation's definition of software freedom. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 173057 +name Kongoni 1.12.3 Linux 32bit Live CD +url http://www.kongoni.org/ +read:bytes 4096 +claim:type shared +drive 6ac51b9d-a1db-44fc-b325-30bdefd0dd0a +write:bytes 708841472 +read:requests 1 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description Debris Linux is a minimalist, desktop-oriented distribution and live CD based on Ubuntu. It includes the GNOME desktop and a small set of popular desktop applications, such as GNOME Office, Firefox web browser, Pidgin instant messenger, and ufw firewall manager. Debris Linux ships with a custom kernel, a custom system installer called DebI, and a script that makes it easy to save and restore any customisations made while in live mode. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Debris 2.0 Linux Live CD +url http://debrislinux.org/ +read:bytes 0 +claim:type shared +drive 258e1026-36bf-4368-ba7c-52836de4f757 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1887436800 +use systemrecovery,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type livecd +status active +description Toorox is a Linux Live-DVD based on Gentoo that starts as a bootable media using KNOPPIX technology. While the system is booting, all necessary drivers will be included automatically (lshwd). Toorox is only using the memory and an existing swap partition at runtime, so your hard disks won't be touched by default. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Toorox 05.2010 Linux 64bit Live CD +url http://toorox.de/ +read:bytes 0 +claim:type shared +drive 8fa3bc29-47e8-496a-89c6-02872a0d2642 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 2516582400 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's"4.4BSD-Lite&quot; release, with some"4.4BSD-Lite2&quot; enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's"Net/2&quot; to the i386, known as"386BSD&quot;, though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name FreeBSD 7.3 Linux 64bit Install CD +url http://www.freebsd.org/ +read:bytes 13836288 +claim:type shared +drive 92444414-dc65-451d-9018-2b1ab8db4ceb +write:bytes 0 +read:requests 3378 +os other + +type cdrom +size 1073741824 +use systemrecovery,security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd,livecd +status active +description KANOTIX is a Linux distribution based on the latest stable release of Debian GNU/Linux. It is built on top of a latest kernel which is carefully patched with fixes and drivers for most modern hardware. Although it can be used as a live CD, it also includes a graphical installer for hard disk installation. The user-friendly nature of the distribution is further enhanced by a custom-built control centre and specialist scripts. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Kanotix 4-2.6 Linux with KDE 64bit Install and Live CD +url http://www.kanotix.com/ +read:bytes 232169472 +claim:type shared +drive c7c33c07-5e28-42c8-9800-eb40e2aef287 +write:bytes 0 +read:requests 56682 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description TinyMe is a Unity Linux-based mini-distribution. It exists to ease installation of Unity Linux on older computers, to provide a minimal installation for developers, and to deliver a fast Linux installation for where only the bare essentials are needed. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name TinyMe 2010 Linux Install CD +url http://tinymelinux.com/ +read:bytes 0 +claim:type shared +drive 87b3f98c-c95c-454d-a002-bef63f5bbc1a +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description moonOS is a complete, Ubuntu-based distribution featuring the LXDE and Enlightenment 17 desktop managers and imaginative, original artwork. A project created and designed by Cambodian artist Chanrithy Thim, moonOS is intended as an operating system for any desktop, laptop or virtual machine. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name MoonOS 3 Linux 32bit Install CD +url http://www.moonos.org/ +read:bytes 0 +claim:type shared +drive d2651d5b-3760-41be-a8b0-6fe5ca208825 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description - +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Yoper 2010 Linux with KDE3 Install CD +url http://yoper-linux.org/ +read:bytes 0 +claim:type shared +drive 50e0ca32-c04a-47e3-be37-1cd6f0ad9ff8 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Easy Peasy is an Ubuntu based operating system for netbooks. It's optimized for netbooks and favors the best software available by delivering Firefox with Flash and Java, Skype, Google Picasa, Songbird etc. out of the box. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name EasyPeasy 1.6 Linux Install CD +url http://www.geteasypeasy.com/ +read:bytes 195153920 +claim:type shared +drive daac6531-8f59-4c96-baa0-6545350d5a5e +write:bytes 0 +read:requests 47645 +os linux + +type cdrom +size 1572864000 +use email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Calculate Linux is a Gentoo-based family of three distinguished distributions. Calculate Directory Server (CDS) is a solution that supports Windows and Linux clients via LDAP + SAMBA, providing proxy, mail and Jabbers servers with streamlined user management. Calculate Linux Desktop (CLD) is a workstation and client distribution with KDE, GNOME or Xfce desktop that includes a wizard to configure a connection to Calculate Directory Server. Calculate Linux Scratch (CLS) is live CD with a build framework for creating a custom distribution. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Calculate 10.4 Linux 64bit Install CD +url http://www.calculate-linux.org/ +read:bytes 16932864 +claim:type shared +drive 20f5b0dd-5c63-40aa-97b8-5b34e5107a25 +write:bytes 0 +read:requests 4134 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description KahelOS is a Linux distribution based on Arch Linux. Its desktop edition comes with pre-configured GNOME as the default desktop environment, GNOME Office productivity suite, Epiphany web browser, GIMP image manipulation program, and other popular GTK+ and GNOME applications. Like Arch Linux, KahelOS maintains a rolling-release model of updating software packages using its parent's repositories. A server edition is also available. Both the desktop and server editions come in the form of installation CDs with text-based installers, but no live media. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name KahelOS 05-01-2010 Linux 64bit Install CD +url http://www.kahelos.org/ +read:bytes 0 +claim:type shared +drive 1ddaedbf-ceb8-43b5-a587-e9e635d97f50 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 524288000 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description PCLinuxOS is a user-friendly, Mandriva-based Linux distribution with out-of-the-box support for many popular graphics and sound cards, as well as other peripheral devices. The bootable live CD provides an easy-to-use graphical installer and the distribution sports a wide range of popular applications for the typical desktop user, including browser plugins and full multimedia playback. The intuitive system configuration tools include Synaptic for package management, Addlocale to add support to many languages, Getopenoffice to install the latest OpenOffice.org, and Mylivecd to create a customised live CD. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PCLinuxOS 2010.1 Linux with KDE Install and Live CD +url http://www.pc-os.org/ +read:bytes 0 +claim:type shared +drive 3e0f427e-10eb-4277-bc3b-48f054908a09 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 524288000 +use multimedia,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description Peppermint was designed for enhanced mobility, efficiency and ease of use. While other operating systems are taking 10 minutes to load, you are already connected, communicating and getting things done. And, unlike other operating systems, Peppermint is ready to use out of the box. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Peppermint 1.0 Linux Live CD +url http://peppermintos.com/ +read:bytes 0 +claim:type shared +drive 92ffa2f6-f663-49d9-98ec-dc0b474369c4 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 419430400 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description PureOS and PureOSlight are GNU/Linux live CDs based on Debian's testing repository. These are desktop distributions that can be used as live media (CD or USB) or as full-featured operating systems installed on a hard disk. PureOS is a 700 MB live CD with KDE, Iceweasel, Icedove, OpenOffice.org, Songbird, VLC and K3B. PureOSlight is a small 300 MB live CD with Xfce, Iceweasel, Icedove, AbiWord, Gnumeric and Exaile. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PureOS 2.0 Linux Live CD +url http://www.pureos.org/ +read:bytes 100663296 +claim:type shared +drive ed6421b5-41c2-4ba3-a3c9-7c330d36e5b3 +write:bytes 0 +read:requests 24576 +os linux + +type cdrom +size 104857600 +use dev,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description ReactOSĀ® is a free, modern operating system based on the design of WindowsĀ® XP/2003. Written completely from scratch, it aims to follow the Windows-NTĀ® architecture designed by Microsoft from the hardware level right through to the application level. This is not a Linux based system, and shares none of the unix architecture. The main goal of the ReactOS project is to provide an operating system which is binary compatible with Windows. This will allow your Windows applications and drivers to run as they would on your Windows system. Additionally, the look and feel of the Windows operating system is used, such that people accustomed to the familiar user interface of WindowsĀ® would find using ReactOS straightforward. The ultimate goal of ReactOS is to allow you to remove WindowsĀ® and install ReactOS without the end user noticing the change. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name ReactOS 0.3.11 Alpha Install CD +url http://www.reactos.org/ +read:bytes 0 +claim:type shared +drive 327fd7dd-a2ca-4437-b87e-7610fccc3202 +write:bytes 0 +read:requests 0 +os other + +type cdrom +size 1887436800 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Ubuntu Studio is a variant of Ubuntu aimed at the GNU/Linux audio, video and graphic enthusiast as well as professional. The distribution provides a collection of open-source applications available for multimedia creation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Ubuntu Studio 10.04 Linux 32bit Install CD +url http://www.ubuntu.com/ +read:bytes 499675136 +claim:type shared +drive c6a368d1-cae6-43d9-8af6-b42142aed4b9 +write:bytes 0 +read:requests 121991 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 32 +drive_type livecd +status active +description Vector Linux is a small, fast, Intel based Linux operating system for PC style computers. The creators of Vector Linux had a single credo: keep it simple, keep it small and let the end user decide what their operating system is going to be. What has evolved from this concept is perhaps the best little Linux operating system available anywhere. For the casual computer user you have a lightening fast desktop with graphical programs to handle your daily activities from web surfing, sending and receiving email, chatting on ICQ or IRC to running an ftp server. The power user will be pleased because all the tools are there to compile their own programs, use the system as a server or perhaps the gateway for their home or office computer network. Administrators will be equally as pleased because the small size and memory requirements of the operating system can be deployed on older machines maybe long forgotten. +favourite true +free true +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Vector 6.0 Linux with KDE Live CD +url http://www.vectorlinux.com/ +read:bytes 0 +claim:type shared +drive 0aa0b75d-ce40-4877-9882-8a81443911fe +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 713031680 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Greenie Linux is an Ubuntu-based distribution customised for Slovak and Czech users. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Greenie 7 Linux 32bit Install CD +url http://www.greenie.sk/ +read:bytes 0 +claim:type shared +drive bdddc973-e84f-4cbc-a2c9-a9fce73bc462 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 67108864 +use networking,gateway +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description pfSense is a free, open source customized distribution ofĀ FreeBSDĀ tailored for use as a firewall and router. In addition to being a powerful, flexible firewalling and routing platform, it includes a long list of related features and a package system allowing further expandability without adding bloat and potential security vulnerabilities to the base distribution. pfSense is a popular project with more than 1 million downloads since its inception, and proven in countless installations ranging from small home networks protecting a PC and an Xbox to large corporations, universities and other organizations protecting thousands of network devices.Ā  +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PfSense 1.2.3 Linux 32bit Live and Install CD +url http://www.pfsense.org +read:bytes 68657152 +claim:type shared +drive db46ea0d-26f3-4cd0-8a55-54da2af10363 +write:bytes 0 +read:requests 16762 +os linux + +type cdrom +size 46137344 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Mandrake Linux was created in 1998 with the goal of making Linux easier to use for everyone. At that time, Linux was already well-known as a powerful and stable operating system that demanded strong technical knowledge and extensive use of the "command line"; MandrakeSoft saw this as an opportunity to integrate the best graphical desktop environments and contribute its own graphical configuration utilities and quickly became famous for setting the standard in ease-of-use and functionality. Mandriva Linux, formerly known as Mandrakelinux, is a friendly Linux Operating System which specializes in ease-of-use for both servers and the home/office. It is freely available in many languages throughout the world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Mandriva Spring 2010 Linux 64bit Net Install CD +url http://mandriva.com/ +read:bytes 19488768 +claim:type shared +drive 857456e4-e16c-4a6f-9bfc-f5be3e58bde5 +write:bytes 0 +read:requests 4758 +os linux + +type cdrom +size 1606418432 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description UHU-Linux is the leading distribution of Linux in Hungary. It is primarily intended for Hungarian users, thus special care is taken to support the Hungarian language as much as possible. Ease of installation and sane default settings both help new users of Linux and make veterans feel comfortable. Usability as the main goal involves having all the cutting-edge yet stable releases of Open Source packages, with dpkg as the package manager. Development is completely open and everyone is invited to join. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 392193 +name Uhu Linux 2.2 32bit Install CD +url http://uhulinux.hu/ +read:bytes 354873344 +claim:type shared +drive 9d99705b-818a-49f8-8c77-0cd4a42cdea6 +write:bytes 1606422528 +read:requests 86639 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Chakra, a fast, user-friendly and extremely powerful Live CD and/or distrolet based on the award winning KDE Software Compilation and on the GNU/Linux distribution for connoisseurs: Arch Linux. Currently in alpha stage, it features a graphical installer, automatic hardware configuration, and of course some more tools and extras. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Chakra 2.0 Linux Live CD +url http://www.chakra-project.org/ +read:bytes 4096 +claim:type shared +drive fdfa8104-05fb-4210-aba5-fe78c4e6ee8c +write:bytes 734007296 +read:requests 1 +os linux + +type cdrom +size 662700032 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description UberStudent ("uber" meaning "productive" in Latin) is an Ubuntu-based distribution on a DVD designed for learning and teaching academic computing at the higher education and advanced secondary levels. UberStudent comes with software for everyday computing tasks, plus a core set of programs and features designed to teach and make easier the tasks and habits common to high-performing students of all academic disciplines. Lifelong learners, as well as any sort of knowledge worker, will equally benefit. UberStudent is supported by a free Moodle virtual learning environment. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0013fc75-b170-4d62-abaf-804b8fc466cc +host 001318df-35c6-439f-8e72-8d57c36ca86b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 161793 +name UberStudent 1.0 Linux with LXDE 32bit Install CD +url http://www.uberstudent.org/ +read:bytes 4096 +claim:type shared +drive 854a9706-fb14-4868-80df-53d712f1531a +write:bytes 662704128 +read:requests 1 +os linux + +type disk +size 3221225472 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready Fedora system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. The Fedora Project is an openly-developed project designed by Red Hat, open for general participation, led by a meritocracy, following a set of project objectives. The goal of The Fedora Project is to work with the Linux community to build a complete, general purpose operating system exclusively from open source software. Development will be done in a public forum. The project will produce time-based releases of Fedora about 2-3 times a year, with a public release schedule. The Red Hat engineering team will continue to participate in building Fedora and will invite and encourage more outside participation than in past releases. By using this more open process, we hope to provide an operating system more in line with the ideals of free software and more appealing to the open source community. +favourite true +install_notes ***You must update the default root/superuser password for Fedora 13 on first login.***\n\nPre-installed Fedora 13 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Complete the personalisation of your new server\n---------------------------------------------------------------------\nUpon first start-up you should be presented with the welcome screen after the initial boot process has completed. You will now have the opportunity to personalise your system installation. \na) Click the 'forward' button to get started.\n\nb) You will now see the license information for this system. Fedora 13 has an open source GNU license. Assuming this is acceptable you should click the 'forward' button again.\n\nc) You can now create your own user account, enter your name and set the password. Please note:\n\nTHIS IS NOT THE ADMINISTRATIVE ACCOUNT. YOU SHOULD RESET THE ROOT/SUPERUSER PASSWORD AS OUTLINED IN STEP 4 BELOW AFTER COMPLETING STEP 3.\n\nd) After clicking forward again you will have the opportunity to set the time servers that will set your servers time. You can just leave the default values unless you have some specific needs. Once you are happy please click the 'forward' button.\n\ne) Finally you have the option to submit your hardware profile to the Fedora Project to help with their development. This is entirely your personal choice. Either way once you are ready click the 'finish' button.\n\n4. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsu root\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n7. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 0003ca60-6b03-4da9-a409-84d8d7afa738 +host 00031836-a624-4b22-bc7d-41ff8977087b +user 00000000-0000-0000-0000-000000000001 +autoexpanding true +write:requests 786433 +name Fedora 13 Linux 64bit Preinstalled System +url http://fedoraproject.org/ +read:bytes 40962080768 +claim:type shared +drive d18119ce-7afa-474a-9242-e0384b160220 +write:bytes 3221229568 +read:requests 10000508 +os linux + +type disk +size 4294967296 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 0003ca60-6b03-4da9-a409-84d8d7afa738 +host 00031836-a624-4b22-bc7d-41ff8977087b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1048577 +name Debian 5.0 Preinstalled without X +url www.debian.org +read:bytes 35180666880 +claim:type shared +drive fd49670e-17e8-4b0e-b03e-d6a65c138445 +write:bytes 4294971392 +read:requests 8589030 +os linux + +type disk +size 21474836480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +favourite true +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name CentOS 5.5 Linux 64bit Preinstalled System +url http://www.centos.org/ +read:bytes 251925499904 +claim:type shared +drive 1ea7dead-9d52-4e79-9a9b-435db7cc972c +write:bytes 21474840576 +read:requests 61505249 +os linux + +type disk +size 2684354560 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready Ubuntu system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. Ubuntu Linux is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit. "Ubuntu" is an ancient African word, meaning "humanity to others". The Ubuntu Linux distribution brings the spirit of Ubuntu to the software world. +favourite true +install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding true +write:requests 655361 +name Ubuntu Linux 10.04 Desktop 64bit Preinstalled System +url http://www.ubuntu.com/ +read:bytes 24617140224 +claim:type shared +drive 99a75966-209f-41d5-817c-7a3916354540 +write:bytes 2684358656 +read:requests 6010044 +os linux + +type disk +size 8589934592 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 01/09/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 2097153 +name Ubuntu 10.04 Server Edition Linux 64bit +url http://www.ubuntu.com/server +read:bytes 71391387648 +claim:type shared +drive 0b060e09-d98b-44cc-95a4-7e3a22ba1b53 +write:bytes 8589938688 +read:requests 17429538 +os linux + +type disk +size 21474836480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready CentOS system including AppFirst monitoring software that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +favourite true +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux with AppFirst Monitoring on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\ne) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nf) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ng) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n6. AppFirst\n-------------------------\nThis disk image includes AppFirst monitoring software already installed. This software is able to provide in-depth server and application performance feedback. In order to take advantage of this software you need to have an AppFirst account.\n\nFull details of AppFirst's services including a 14-day free trial are available at http://www.appfirst.com . +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name CentOS 5.5 Linux 64bit Preinstalled System with AppFirst Monitoring +read:bytes 838707331072 +claim:type shared +drive c157e1eb-aa9c-4dd7-80b8-6fd4a238f2a9 +write:bytes 21474840576 +read:requests 204762532 +os linux + +type disk +size 8589934592 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 001118bb-dbdb-4ab0-b7db-d4cceb160098 +host 00115b1d-6fe9-40b2-a013-426a6a584ff7 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 2097153 +name Debian 5.0 Preinstalled +url www.debian.org +read:bytes 71179878400 +claim:type shared +drive 9b732c4e-32a3-4369-b5f7-9a0325195baa +write:bytes 8589938688 +read:requests 17377900 +os linux + +type cdrom +claimed 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:7055acf3-8d9a-4a99-a24f-dda1aaf37944:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:0a486768-08c1-419d-ad9c-1c8143df3496:ide:0:0 +size 2248146944 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 64 +email drives@elastichosts.com +drive_type installcd +status active +description - +favourite false +free false +volume 0007aee7-bd5b-4551-9d8f-a958051235a9 +host 00079b57-1b29-4a89-a8d0-1d648fc20804 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Web Server 2008 Trial Install CD +url http://www.microsoft.com +read:bytes 55097581056 +claim:type shared +drive 7aead6d3-c3e6-4940-85c7-f5ee61f6ef2b +write:bytes 0 +read:requests 22364695 +os windows + +type cdrom +claimed 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:6100e29c-708d-4a5b-821b-6a9faa3ba013:ide:0:1 00031836-a624-4b22-bc7d-41ff8977087b:guest:fcde7569-e034-452c-9909-7c485f5d168f:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:1ac4641e-aa67-47f2-a77d-e9c5982d68b2:ide:0:0 0012c12d-72b1-4dfc-ae0f-aeab09881545:guest:300989f8-da5c-42a6-91f8-97e87b85b748:ide:0:1 00016115-af87-452b-a3bf-3affc8a7d934:guest:f679b4ba-a4de-4254-90d1-27396aac8712:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:65e59c8b-579b-4977-b60c-b3b7eb404026:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:76eaf2fe-554a-4d3f-a3ef-a1214e878793:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:030cfdda-9c6c-4622-a68c-2e3588fbe828:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:64a5375a-31cc-414f-9e14-006b5c39b51f:ide:0:0 00059836-5512-4ce2-bf66-4daab2d994e4:guest:83da4fb5-037f-4985-a0f6-f696fa7ff727:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:90f4a2d3-9b76-4444-a1b2-72bbd06fe3e2:ide:0:0 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:cbb4ecc9-654f-4410-aeb4-b9ca602faa01:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:e7ea14b2-aaa0-48b4-b1ac-7c8351c2edf4:ide:0:0 001318df-35c6-439f-8e72-8d57c36ca86b:guest:67f96fa3-8d41-4f8b-8199-4111617d3150:ide:0:1 000663ee-9fb6-4461-90f6-01327a4aff07:guest:245dd0b0-18eb-4e24-b219-9549bafdea87:ide:0:0 000663ee-9fb6-4461-90f6-01327a4aff07:guest:b52e106f-f14c-4312-8597-bcfedf4b0e70:ide:0:0 +size 2663383040 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +email drives@elastichosts.com +drive_type installcd +status active +description - +favourite false +install_notes pass:123456 +volume 0007aee7-bd5b-4551-9d8f-a958051235a9 +host 00079b57-1b29-4a89-a8d0-1d648fc20804 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Server 2008 Trial Install CD +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 78315713024 +claim:type shared +drive f89af28e-ff00-4fc9-a7ed-22e7fa5a88db +write:bytes 0 +read:requests 32289210 +os windows + +status active +name Gentoo Install Minimal amd64 20100408 +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 437561856 +write:bytes 119558144 +claim:type shared +drive 73162606-78ca-4b0a-8f7a-70aa70563d90 +free none +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +os linux +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 79760 +claimed 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:d74c8d2b-a169-486c-adbd-89ca50dccafa:ide:0:1 +type cdrom +write:requests 29189 +size 209715200 + +status active +name Peppermint Ice Linux 32bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 1986560 +description Peppermint OS is an Ubuntu-based Linux distribution that aims to be lightning fast and easy on system resources. By employing Mozilla's Prism technology Peppermint integrates seamlessly with Cloud and web-based applications. The distribution's other features include automatic updates, easy step-by-step installation, sleek and user-friendly interface, and increased mobility by integrating directly with Cloud-based applications. +write:bytes 437698560 +claim:type shared +drive f9d92afc-27ff-4139-84c7-ac6655e6f6f1 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 485 +free none +type cdrom +write:requests 106860 +size 436207616 + +status active +name Super Gamer Linux 32bit and 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 137039872 +description SuperGamer is a live DVD based on VectorLinux, intended to showcase gaming on Linux. The distribution is optimised for a gaming computer environment, with some tweaks to help speed up running from the live DVD. Extra games are added along with some demo editions of proprietary games. All games are native Linux games, but users wishing to run Windows games may install WINE or a related emulator, such as Cedega. +write:bytes 8446324736 +claim:type shared +drive d72701b2-01b9-4ac3-9afa-d0afdb6bcf2f +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 33457 +free none +type cdrom +write:requests 2062091 +size 8413773824 + +status active +name ZeroShell 1.3 Linux Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 856064 +description Zeroshell is a Linux distribution for servers and embedded devices aimed at providing the main network services a LAN requires. It is available in the form of Live CD or Compact Flash image and you can configure and administer it using your web browser. +write:bytes 153247744 +claim:type shared +drive 44358ce4-0f30-4e48-86d1-e93330961a8a +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 209 +free none +type cdrom +write:requests 37414 +size 155189248 + +status active +name Astaro Security Gateway Firewall Server 8.0 Linux Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 365871104 +description Astaro offers an integrated software solution that provides superior performance in an all-in-one firewall. Its hardened operating system, stateful packet inspection, content filtering (virus & surf protection), application proxies and IPsec based VPN provides a powerful solution to today's security issues. It is designed to maximize your networks security without compromising its performance enabling telecommuters, branch offices, customers and suppliers to safely share critical business information. Our proprietary user interface, WebAdmin allows ease of use and manageability of all open source firewall components, as well as the Up2Date service via the Internet. It is easy to install with all components on one CD achieving simple implementation and integration to existing network environments. +write:bytes 369696768 +claim:type shared +drive 916b0e39-b234-407b-89ab-e8108f05726f +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 89324 +claimed 000096ce-ff07-413d-912a-aa1a33963802:guest:20911753-98a6-4951-af34-89e157452c84:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:75a96f35-c3fd-492a-a48b-34dcd10987d6:ide:0:0 +free none +type cdrom +write:requests 90258 +size 369098752 + +status active +name Chakra 0.2.2 Linux 64bit Install and Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 5451776 +description Chakra GNU/Linux is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. +write:bytes 724774912 +claim:type shared +drive 0e8c8ac2-f791-4764-a964-c6d2679ae49a +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0000acbf-fa0a-44a6-8018-2f106f96a45f +host 000096ce-ff07-413d-912a-aa1a33963802 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1331 +free none +type cdrom +write:requests 176947 +size 721420288 + +status active +name Clonezilla Live 1.2.6 64bit +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 876544 +description Clonezilla Live is a Debian-based live CD containing Clonezilla, a partition and disk cloning software similar to Norton Ghost. It saves and restores only used blocks in hard drive. With Clonezilla, one can clone a 5 GB system to 40 clients in about 10 minutes. +write:bytes 134045696 +claim:type shared +drive cec8330f-59c7-4e20-9577-54df28d598e7 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 0000acbf-fa0a-44a6-8018-2f106f96a45f +host 000096ce-ff07-413d-912a-aa1a33963802 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 214 +free none +type cdrom +write:requests 32726 +size 134217728 + +status active +name Absolute Linux 13.1.2 Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 93573632 +description Absolute Linux is a light-weight modification of Slackware Linux. It includes several utilities that make configuration and maintenance easier and it has many common desktop and Internet applications installed and configured with tight integration of menus, applications and MIME types. Absolute Linux uses IceWM and ROX for its window and file managers. +write:bytes 728211456 +claim:type shared +drive b745638c-87ff-4836-8623-e48e67286494 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 39232 +free none +type cdrom +write:requests 177786 +size 725614592 + +status active +name Sabayon_Linux_5.4_amd64_K.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 12877824 +write:bytes 2160496640 +claim:type shared +drive 75119285-7c20-43f4-9d3b-e6af3f1823e3 +free none +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3144 +type cdrom +write:requests 527465 +size 2151677952 + +status active +name FreeBSD 8.1 Linux 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 60035072 +description About FreeBSD FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +write:bytes 2315309056 +claim:type shared +drive fb940d5b-b9a0-4f9c-8cb7-94c3378d1676 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 14657 +free none +type cdrom +write:requests 565261 +size 2306867200 + +status active +name BackTrack 4 Release 1 Linux Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 4008857600 +description A SLAX-based live CD with a comprehensive collection of security and forensics tools BackTrack 4 R1, a Linux-based penetration testing arsenal for security professionals. +write:bytes 2023919616 +claim:type shared +drive ef152c9c-1460-44f5-b192-8e0524909709 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 978725 +free none +type cdrom +write:requests 494121 +size 2017460224 + +status active +name Vector 6.0 Linux 32bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 3035136 +description VECTORLINUX is a small, fast, Intel Linux operating system based on one of the original Linux distributions, Slackware. The enormously popular Slackware is the true "Unix" of Linux distributions and is used by major corporations, universities and home users alike. It's popularity stems from the fact that it is a robust, versatile and almost unbreakable system. Slackware has been traditionally known to be about as user friendly as a coiled rattlesnake and that's where Vector Linux comes into play. We have produced a bloat free, easy to install, configure and maintain Slackware based system that is second to none. +write:bytes 729059328 +claim:type shared +drive c2a757b9-dfd0-432c-bb29-b380b4dd6fb6 +free none +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 741 +type cdrom +write:requests 177993 +size 729808896 + +status active +name PCBSD 8.1 Linux 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 15228928 +description PC-BSD has as its goals to be an easy-to-install-and-use desktop operating system, based on FreeBSD. To accomplish this, it currently has a graphical installation, which will enable even UNIX novices to easily install and get it running. It will also come with KDE pre-built, so that the desktop can be used immediately. Currently in development is a graphical software installation program, which will make installing pre-built software as easy as other popular operating systems. +write:bytes 3794726912 +claim:type shared +drive 802fbcab-2723-469c-b775-6fdeb21287da +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3718 +free none +type cdrom +write:requests 926447 +size 3783262208 + +status active +name nst-2.13.0.x86_64.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 7503872 +write:bytes 1436717056 +claim:type shared +drive 9d04c648-712d-4076-bd99-70088d85fe01 +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1832 +type cdrom +write:requests 350761 +size 1430257664 + +status active +name Peppermint-Ice-10012010.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 2613248 +write:bytes 452710400 +claim:type shared +drive 2e79eeee-b4ad-4dcf-a072-86dcede6af1b +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 638 +type cdrom +write:requests 110525 +size 452984832 + +status active +name Sabayon_Linux_5.4_amd64_K.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 14082048 +write:bytes 2161713152 +claim:type shared +drive 07e2a6df-8389-4130-a003-edacc19a9ee3 +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3438 +type cdrom +write:requests 527762 +size 2151677952 + +type cdrom +claimed 00031836-a624-4b22-bc7d-41ff8977087b:guest:ffe02269-b653-47ad-ab21-a02805b24904:ide:0:0 000096ce-ff07-413d-912a-aa1a33963802:guest:1f378a18-1b59-40e7-8e9a-7f81d7eda6b8:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:8c13b69d-6d11-4151-975b-a2f084c7ada7:ide:0:0 00166b98-6431-40ad-94b0-244881ff87d5:guest:1705b116-aac2-449a-b0de-3dd4ab7e765f:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:30d887ee-ed96-4c32-a1a8-5ab49abd2a7e:ide:0:1 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:bcea8695-baeb-476e-8089-475ce8948646:ide:0:1 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:df1368af-05a3-4ad5-8017-54be3ea70232:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:3569d646-7ae5-410f-b66e-64bba1381cba:ide:0:0 +size 2663383040 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 64 +drive_type installcd +status active +description - +favourite false +free false +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Server 2008 Trial Install CD +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 5261708288 +claim:type shared +drive 7e23b099-dd35-446b-8d90-2953643b664f +write:bytes 0 +read:requests 1883649 +os windows + +status active +name Unity Linux 64bit Install and Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 147034112 +description The community-oriented Unity Linux is a minimalist distribution and live CD based on Mandriva Linux. The project's main goal is to create a base operating system from which more complete, user-oriented distribution can easily be built - either by other distribution projects or by the users themselves. Unity Linux uses Openbox as the default window manager. Its package management is handled via Smart and RPM 5 which can download and install additional software packages from the project's online repository. +write:bytes 290488320 +claim:type shared +drive d235dada-407c-4105-b4ef-636eb7604404 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 35897 +free none +type cdrom +write:requests 70920 +size 289406976 + +type disk +licenses msft_p73_04837 msft_tfa_00009 +size 21474836480 +use dbserver,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub +url http://www.microsoft.com/sqlserver/2008/en/us/ +read:bytes 49172439040 +claim:type shared +drive 7b013f8c-dd4c-4701-b1ca-936506dc37ca +write:bytes 21474840576 +read:requests 12004990 +os windows + +type disk +licenses msft_lwa_00135 +size 13958643712 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Web Server 2008***\n\nPre-installed Windows Server 2008 Web R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" +volume 00120946-d7a4-486e-867e-8348bebe0b95 +host 0012c12d-72b1-4dfc-ae0f-aeab09881545 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 3407873 +name Windows Server Web 2008 R2 64bit English +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 145252270080 +claim:type shared +drive 71697799-c611-41b9-93be-f79152aefbe5 +write:bytes 13958647808 +read:requests 35461980 +os windows + +type disk +licenses msft_p73_04837 +size 13958643712 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" +volume 0013fc75-b170-4d62-abaf-804b8fc466cc +host 001318df-35c6-439f-8e72-8d57c36ca86b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 3407873 +name Windows Server Standard 2008 R2 64bit English +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 257073537024 +claim:type shared +drive 0611be3f-0607-4b3c-8bad-a0af392d928a +write:bytes 13958647808 +read:requests 62762094 +os windows diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/resources_ip_create.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,13 @@ +resource 1.2.3.4 +netmask 255.255.255.0 +nameserver 91.203.56.1 +user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b +type ip +gateway 91.203.56.1 + +resource 1.2.3.5 +netmask 255.255.255.0 +nameserver 91.203.56.1 +user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b +type ip +gateway 91.203.56.1 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/resources_ip_list.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +ip 1.2.3.4 +ip 1.2.3.5 +ip 1.2.3.6 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_create.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_create.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_create.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_create.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 1 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_info.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_info.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_info.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_info.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 1 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_set.txt libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_set.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma/servers_set.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma/servers_set.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 2 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/balance.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +{"balance": "10.00", "currency": "USD"} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/capabilities.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,26 @@ +{ + "drives": { + "dssd": { + "max_size": 8858013190752, + "min_size": 536870912 + } + }, + "servers": { + "cpu": { + "max": 80000, + "min": 250 + }, + "cpu_per_smp": { + "max": 2200, + "min": 1000 + }, + "mem": { + "max": 137438953472, + "min": 268435456 + }, + "smp": { + "max": 40, + "min": 1 + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/create_subscription.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,23 @@ +{ + "objects": [ + { + "amount": "1", + "auto_renew": false, + "descendants": [], + "discount_amount": "0", + "discount_percent": "0", + "end_time": "2014-03-01T12:00:00+00:00", + "id": "228816", + "period": "1 month", + "price": "10.26666666666666666666666667", + "remaining": "1", + "resource": "vlan", + "resource_uri": "/api/2.0/subscriptions/228816/", + "start_time": "2014-01-31T17:06:19.388295+00:00", + "status": "active", + "subscribed_object": "2494079f-8376-40bf-9b37-34d633b8a7b7", + "uuid": "0dd25c5c-6c01-498f-b009-e07d76552a1a" + } + ], + "price": "10.26666666666666666666666667" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/currentusage.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,88 @@ +{ + "balance": { + "balance": "378.74599035374868510600", + "currency": "USD" + }, + "usage": { + "cpu": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "dssd": { + "burst": 13958643712, + "subscribed": 0, + "using": 13958643712 + }, + "ip": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "mem": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_7jq_00341": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_7nq_00302": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_lwa_00135": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_p71_01031": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_p73_04837": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_p73_04837_core": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_tfa_00009": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "msft_tfa_00523": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "sms": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "ssd": { + "burst": 0, + "subscribed": 0, + "using": 0 + }, + "tx": { + "burst": 0, + "subscribed": 5368709120, + "using": 0 + }, + "vlan": { + "burst": 0, + "subscribed": 0, + "using": 0 + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_avail_groups.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +[ + [ + "ad2b0b9c-8b66-45bc-a0f8-3a8514b78406", + "e464a01b-ad2a-4bed-a4dd-30d1687560fd", + "2246e488-a1b9-4da2-af30-0b6c73a1529c", + "51a6b22f-2884-48d9-87f8-c85cb6f43c99", + "a67c932d-6766-470b-b1c5-17856e4a5b4e", + "3af58efd-8442-466f-80bf-48c5a2ee84b6", + "ab35089c-0a89-435f-aedd-eaa05fae0ef1", + "9972280b-3d74-4b0d-85de-caa0ef0117a6", + "c47bca85-0199-438c-9ae4-d308357cf22d", + "3e166706-188c-4f38-b8d5-7fc10a5019a1", + "ba47e1e9-1848-48bd-8786-9cc45744214c" + ] +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_clone.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "objects": [ + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "cloned drive", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": null + }, + "size": 2097152000, + "snapshots": [], + "status": "creating", + "storage_type": null, + "tags": [], + "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_create.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "objects": [ + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "test drive 5", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": null + }, + "size": 2097152000, + "snapshots": [], + "status": "creating", + "storage_type": null, + "tags": [], + "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_detail.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,154 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 4 + }, + "objects": [ + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": { + "arch": "64", + "category": "general", + "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", + "favourite": "False", + "image_type": "preinst", + "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", + "os": "linux", + "paid": "False", + "url": "http://www.debian.org/" + }, + "mounted_on": [], + "name": "test node 2-drive", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/9d1d2cf3-08c1-462f-8485-f4b073560809/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": "dssd" + }, + "size": 13958643712, + "snapshots": [], + "status": "unmounted", + "storage_type": "dssd", + "tags": [], + "uuid": "9d1d2cf3-08c1-462f-8485-f4b073560809" + }, + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": { + "arch": "64", + "category": "general", + "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", + "favourite": "False", + "image_type": "preinst", + "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", + "os": "linux", + "paid": "False", + "url": "http://www.debian.org/" + }, + "mounted_on": [], + "name": "test node 3-drive", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/c9799969-0016-4298-a72c-93cabc067c6e/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": "dssd" + }, + "size": 13958643712, + "snapshots": [], + "status": "unmounted", + "storage_type": "dssd", + "tags": [], + "uuid": "c9799969-0016-4298-a72c-93cabc067c6e" + }, + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": { + "arch": "64", + "category": "general", + "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", + "favourite": "False", + "image_type": "preinst", + "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", + "os": "linux", + "paid": "False", + "url": "http://www.debian.org/" + }, + "mounted_on": [], + "name": "test node 2-drive", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/967c8bbd-ca32-42db-a9b8-95e270e0aae1/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": "dssd" + }, + "size": 13958643712, + "snapshots": [], + "status": "unmounted", + "storage_type": "dssd", + "tags": [], + "uuid": "967c8bbd-ca32-42db-a9b8-95e270e0aae1" + }, + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": { + "arch": "64", + "category": "general", + "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", + "favourite": "False", + "image_type": "preinst", + "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", + "os": "linux", + "paid": "False", + "url": "http://www.debian.org/" + }, + "mounted_on": [ + { + "resource_uri": "/api/2.0/servers/e06cf7b3-ea46-4d38-87e0-3f918c4648d3/", + "uuid": "e06cf7b3-ea46-4d38-87e0-3f918c4648d3" + } + ], + "name": "test node 2-drive", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/3e166706-188c-4f38-b8d5-7fc10a5019a1/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": "dssd" + }, + "size": 13958643712, + "snapshots": [], + "status": "mounted", + "storage_type": "dssd", + "tags": [], + "uuid": "3e166706-188c-4f38-b8d5-7fc10a5019a1" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_get.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "test drive 5", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": null + }, + "size": 2097152000, + "snapshots": [], + "status": "unmounted", + "storage_type": null, + "tags": [], + "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/drives_resize.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "objects": [ + { + "affinities": [], + "allow_multimount": false, + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "test drive 5", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/drives/b02311e2-a83c-4c12-af10-b30d51c86913/", + "runtime": { + "snapshots_allocated_size": 0, + "storage_type": null + }, + "size": 1164967936, + "snapshots": [], + "status": "creating", + "storage_type": null, + "tags": [], + "uuid": "b02311e2-a83c-4c12-af10-b30d51c86913" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_no_rules.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "objects": [ + { + "meta": {}, + "name": "test policy 1", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/fwpolicies/ae9e5982-33fd-4e89-a467-4480256ccdb6/", + "rules": [], + "servers": [], + "uuid": "ae9e5982-33fd-4e89-a467-4480256ccdb6" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_create_with_rules.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,27 @@ +{ + "objects": [ + { + "meta": {}, + "name": "test policy 2", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/fwpolicies/324819a5-7a5b-4231-957d-662a7429fb8c/", + "rules": [ + { + "action": "accept", + "comment": null, + "direction": "out", + "dst_ip": "127.0.0.1/32", + "dst_port": null, + "ip_proto": "tcp", + "src_ip": "127.0.0.1/32", + "src_port": null + } + ], + "servers": [], + "uuid": "324819a5-7a5b-4231-957d-662a7429fb8c" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/fwpolicies_detail.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,84 @@ +{ + "meta": { + "limit": 0, + "offset": 0, + "total_count": 2 + }, + "objects": [ + { + "meta": {}, + "name": "test policy", + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "resource_uri": "/api/2.0/fwpolicies/0e339282-0cb5-41ac-a9db-727fb62ff2dc/", + "rules": [], + "servers": [], + "uuid": "0e339282-0cb5-41ac-a9db-727fb62ff2dc" + }, + { + "meta": {}, + "name": "My awesome policy", + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "resource_uri": "/api/2.0/fwpolicies/0c754845-f2ed-4347-8758-4315f7fa9c22/", + "rules": [ + { + "action": "drop", + "comment": "Drop traffic from the VM to IP address 23.0.0.0/32", + "direction": "out", + "dst_ip": "23.0.0.0/32", + "dst_port": null, + "ip_proto": "tcp", + "src_ip": null, + "src_port": null + }, + { + "action": "accept", + "comment": "Allow SSH traffic to the VM from our office in Dubai", + "direction": "in", + "dst_ip": null, + "dst_port": "22", + "ip_proto": "tcp", + "src_ip": "172.66.32.0/24", + "src_port": null + }, + { + "action": "drop", + "comment": "Drop all other SSH traffic to the VM", + "direction": "in", + "dst_ip": null, + "dst_port": "22", + "ip_proto": "tcp", + "src_ip": null, + "src_port": null + }, + { + "action": "drop", + "comment": "Drop all UDP traffic to the VM, not originating from 172.66.32.55", + "direction": "in", + "dst_ip": null, + "dst_port": null, + "ip_proto": "udp", + "src_ip": "!172.66.32.55/32", + "src_port": null + }, + { + "action": "drop", + "comment": "Drop any traffic, to the VM with destination port not between 1-1024", + "direction": "in", + "dst_ip": null, + "dst_port": "!1:1024", + "ip_proto": "tcp", + "src_ip": null, + "src_port": null + } + ], + "servers": [], + "uuid": "0c754845-f2ed-4347-8758-4315f7fa9c22" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/libdrives.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,569 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 89 + }, + "objects": [ + { + "affinities": [], + "allow_multimount": false, + "arch": "32", + "category": [ + "security" + ], + "description": "IPCop 2.0.2 - i486 - CD.\r\nThe IPCop Firewall is a Linux firewall distribution.", + "favourite": true, + "image_type": "install", + "install_notes": "1. Attach the CD.\\n Please be aware that the CD needs to be attached to the server to IDE. \\n \\n 2. Attach a Drive.\\n Please be aware that the minimum drive size where you are going to install the OS should be 5 GB. \\n \\n 3. Connecting to your server via VNC.\\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 4. Minimum Hardware Requirements.\\n The recommended minimum hardware requirements as publishes by ipcop.org are: 32MB RAM and 386MHz CPU \\n We recommend specifying a higher level of RAM for a better user experience.\\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "IPCop 2.0.2", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/6eca8d96-44bc-4637-af97-77ccd7ba4144/", + "size": 1000000000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.ipcop.org/", + "uuid": "6eca8d96-44bc-4637-af97-77ccd7ba4144" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "FreeBSD-8.4-RELEASE-amd64-disc1", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD.\\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \\n\r\n\\n\r\n2. Attach a Drive.\\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \\n\r\n\\n\r\n3. Connecting to your server via VNC.\\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \\n\r\n\\n\r\n4. Minimum Hardware Requirements.\\n\r\nThe recommended minimum hardware requirements as publishes by freebsd.org are: 0.5 GB RAM and 0.5\\n GHz CPU We recommend specifying a higher level of RAM for a better user experience.\\n\r\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "FreeBSD 8.4", + "os": "other", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/c143724d-4d40-4871-bc2c-5120b4263ab3/", + "size": 536870912, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.freebsd.org/", + "uuid": "c143724d-4d40-4871-bc2c-5120b4263ab3" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "32", + "category": [ + "general" + ], + "description": "Ubuntu 12.04 Desktop - 32bit - Install CD", + "favourite": true, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Ubuntu 12.04 Desktop", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/703e693e-056d-4cd6-9531-36ec045fee7c/", + "size": 1000000000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.ubuntu.com/", + "uuid": "703e693e-056d-4cd6-9531-36ec045fee7c" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "32", + "category": [ + "general" + ], + "description": "Ubuntu 12.04 Server - 32bit - Install CD", + "favourite": true, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. \r\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Ubuntu 12.04 Server", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/36b01118-55f4-454f-92ee-578eb6d99867/", + "size": 1000000000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.ubuntu.com/", + "uuid": "36b01118-55f4-454f-92ee-578eb6d99867" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Windows Server 2012 Standard - 64bit Install CD", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD \\n Please be aware that the CD needs to be attached to the server to IDE. \\n \\n 2. Attach a Drive \\n Please be aware that the minimum drive size where you are going to install the OS should be 32 GB. \\n \\n 3. Connecting to your server via VNC \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 4. Minimum Hardware Requirements \\n The recommended minimum hardware requirements as publishes by Microsoft can be found through the following link: http://msdn.microsoft.com/library/dn303418.aspx We recommend specifying a higher level of RAM for a better user experience. \\n \\n 5. Enter your license key. \\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Windows Server Standard 2012", + "os": "windows", + "owner": null, + "paid": true, + "resource_uri": "/api/2.0/libdrives/b4273b6d-b227-4966-9e6e-5d48cebfcaa5/", + "size": 3694116864, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.microsoft.com/", + "uuid": "b4273b6d-b227-4966-9e6e-5d48cebfcaa5" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "32", + "category": [ + "general" + ], + "description": "Knoppix 6 - 32bit - CD", + "favourite": false, + "image_type": "live", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Knoppix.net are: 512MB RAM and 512MHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Knoppix 6.4.3", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/ed47dc6d-4efd-4c05-b2f8-ab32ccf6de3b/", + "size": 3670016000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://knoppix.net/", + "uuid": "ed47dc6d-4efd-4c05-b2f8-ab32ccf6de3b" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "This image is produced by SixSq specifically to work with SlipStream.", + "favourite": false, + "image_type": "preinst", + "install_notes": "", + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "ubuntu-10.04-toMP", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/5236b9ee-f735-42fd-a236-17558f9e12d3/", + "size": 3221225472, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "5236b9ee-f735-42fd-a236-17558f9e12d3" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "CentOS 6.4 Server - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/30.", + "favourite": false, + "image_type": "preinst", + "install_notes": "1. Clone the Image.\\n\r\nThe image needs to be cloned and then attached to the server.\\n\r\n\\n\r\n2. Connecting to your server via VNC.\\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI.\\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window.\\n\r\nc) Start to configure your server.\\n\r\n\\n\r\n3. Minimum Hardware Requirements.\\n\r\nThe recommended minimum hardware requirements as publishes by centos.org: 0.5GB RAM and 0.5GHz CPU\\n\r\n\\n\r\n4. Update your administrator password.\\n\r\nBy default and for security reasons \"root\" login is completely disabled (including for ssh)\\n\r\nUser \"cloudsigma\" with password \"cloudsigma\" is available for access.\\n\r\nPlease be aware that on the first login you will be asked to change the current password \"cloudsigma\" and set a secure password.\\n\r\nFor \"root\" please use the command \"sudo su\"\\n\r\n\\n\r\n5. Setup your CentOS.\\n\r\n\\n\r\n6. Configuring your Networking.\\n\r\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will assign the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\\n\r\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs.\\n", + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "CentOS 6.4 Server", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/cc08cd15-0c17-429f-bd56-15fefaca9d88/", + "size": 10737418240, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.centos.org/", + "uuid": "cc08cd15-0c17-429f-bd56-15fefaca9d88" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Debian 6.0.7 - amd64 Pre-Installed English with SSH and VirtIO support. Last update on 2013/09/28.", + "favourite": false, + "image_type": "preinst", + "install_notes": "Intall notes:\t1. Clone the Image. \\n The image needs to be cloned and then attached to the server. \\n \\n 2. Connecting to your server via VNC. \\n a) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n b) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n c) Start to configure your server. \\n \\n 3. Minimum Hardware Requirements. \\n The recommended minimum hardware requirements as publishes by Debian.org: 1GB RAM and 1GHrz CPU \\n \\n 4. Update your administrator password. \\n Please be aware that on startup you will be asked to enter the current password \"password123\" and set a secure password. \\n \\n 5. Setup your Debian. \\n By default the timezone and the language are set to Switzerland. \\n \\n 6. Configuring your Networking. \\n a) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings. \\n b) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users. It is important that you secure access to your server according to your needs. \\n", + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "Debian 6.0.7 Desktop", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/e848e216-76bb-4c1d-a376-54e4bdf54fe4/", + "size": 10737418240, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.debian.org/", + "uuid": "e848e216-76bb-4c1d-a376-54e4bdf54fe4" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "CentOS 6.3 - 64bit - DVD\r\n", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5 GB RAM and 0.5 GHz CPU We recommend specifying a higher level of RAM for a better user experience.", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "CentOS 6.3 DVD", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/65c99e46-296c-4d3f-ad1f-88dc06772624/", + "size": 4289396736, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "65c99e46-296c-4d3f-ad1f-88dc06772624" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "CentOS 6.4 - 64bit - DVD\r\n", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the DVD. \\n\r\nPlease be aware that the DVD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5GB RAM and 0.5GHz CPU We recommend specifying a higher level of RAM for a better user experience.", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "CentOS 6.4 DVD", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/608e784a-5bff-4d25-afeb-bf7f998f56ef/", + "size": 4353392640, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "608e784a-5bff-4d25-afeb-bf7f998f56ef" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Ubuntu 10.04.3 LTS Server Edition 64bit - CD", + "favourite": true, + "image_type": "install", + "install_notes": "\r\n1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu,com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Ubuntu 10.04.3 Server", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/b69cbf27-d2a8-44f7-bc5a-3facc70021a8/", + "size": 1000000000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://ubuntu.com/", + "uuid": "b69cbf27-d2a8-44f7-bc5a-3facc70021a8" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Slackware-13.37 - 64bit - Install DVD", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Slackware.com are: 128MB RAM and 512MHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Slackware 13.37", + "os": "linux", + "owner": null, + "paid": true, + "resource_uri": "/api/2.0/libdrives/e209e588-8c06-44ce-8d57-c10df32c5876/", + "size": 4613734400, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "e209e588-8c06-44ce-8d57-c10df32c5876" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "32", + "category": [ + "general" + ], + "description": "RedHat Enterprise 6.0 - 32bit - Install DVD", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by RedHat.com are: 2GB RAM and 2GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "RedHat Enterprise 6.0", + "os": "linux", + "owner": null, + "paid": true, + "resource_uri": "/api/2.0/libdrives/a0638d80-bc5b-48a3-a7ba-dec2416239bf/", + "size": 2936012800, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.redhat.com/", + "uuid": "a0638d80-bc5b-48a3-a7ba-dec2416239bf" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "CentOS 6.3 SlipStream. This image is produced by SixSq specifically to work with SlipStream.", + "favourite": false, + "image_type": "preinst", + "install_notes": "CentOS 6.3 SlipStream", + "jobs": [], + "licenses": [], + "media": "disk", + "meta": {}, + "mounted_on": [], + "name": "CentOS 6.3 for SlipStream", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/ac51c08f-d22b-4da8-9591-d343947f7455/", + "size": 7516192768, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "ac51c08f-d22b-4da8-9591-d343947f7455" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Debian 6 - 64bit - CD", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Debian 6", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/98f810a3-b8f0-4441-89cd-02be4f2614d7/", + "size": 676331520, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "98f810a3-b8f0-4441-89cd-02be4f2614d7" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Ubuntu 12.10 Server - 64bit - Install CD", + "favourite": true, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Ubuntu.com are: 0.5GB RAM and 0.5GHrz CPU We recommend specifying a higher level of RAM for a better user experience. \r\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Ubuntu 12.10 Server", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/6afbda4b-1027-4405-9ae9-c7d32f097d31/", + "size": 1000000000, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.ubuntu.com/", + "uuid": "6afbda4b-1027-4405-9ae9-c7d32f097d31" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Debian 7.1 - 64bit - CD", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience.", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Debian 7.1", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/958bf26c-f25b-457d-aedb-a5cfb36bdeef/", + "size": 536870912, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "958bf26c-f25b-457d-aedb-a5cfb36bdeef" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "CentOS 6.2 - 64bit - DVD\r\n", + "favourite": false, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by centos.org are: 0.5 GB RAM and 0.5 GHz CPU We recommend specifying a higher level of RAM for a better user experience.\r\n", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "CentOS 6.2 DVD", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/d7cdd30f-2197-47ac-a878-c285c1e67426/", + "size": 4423139328, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "", + "uuid": "d7cdd30f-2197-47ac-a878-c285c1e67426" + }, + { + "affinities": [], + "allow_multimount": false, + "arch": "64", + "category": [ + "general" + ], + "description": "Debian 5.0 - 64bit - CD", + "favourite": true, + "image_type": "install", + "install_notes": "1. Attach the CD. \\n\r\nPlease be aware that the CD needs to be attached to the server to IDE. \r\n\\n\r\n \\n\r\n2. Attach a Drive. \\n\r\nPlease be aware that the minimum drive size where you are going to install the OS should be 5 GB. \r\n\\n\r\n \\n\r\n3. Connecting to your server via VNC. \\n\r\na) Having installed a compatible VNC client, open a VNC connection to your server through the UI. \\n\r\nb) Enter your VNC url and VNC password as displayed on your Server Properties Window. \\n\r\nc) Start to configure your server. \r\n\\n\r\n \\n\r\n4. Minimum Hardware Requirements. \\n\r\nThe recommended minimum hardware requirements as publishes by Debian.org are: 1GB RAM and 1GHrz CPU We recommend specifying a higher level of RAM for a better user experience. ", + "jobs": [], + "licenses": [], + "media": "cdrom", + "meta": {}, + "mounted_on": [], + "name": "Debian 5.0", + "os": "linux", + "owner": null, + "paid": false, + "resource_uri": "/api/2.0/libdrives/794a068d-228c-4758-81f0-e1bc955a6cce/", + "size": 4697620480, + "status": "unmounted", + "storage_type": null, + "tags": [], + "url": "http://www.debian.org/", + "uuid": "794a068d-228c-4758-81f0-e1bc955a6cce" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/pricing.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,2889 @@ +{ + "current": { + "cpu": 5, + "dssd": 1, + "ip": 1, + "mem": 7, + "msft_7jq_00341": 1, + "msft_7nq_00302": 1, + "msft_lwa_00135": 1, + "msft_p71_01031": 1, + "msft_p73_04837": 1, + "msft_p73_04837_core": 1, + "msft_tfa_00009": 1, + "msft_tfa_00523": 1, + "sms": 1, + "ssd": 1, + "tx": 8, + "vlan": 1 + }, + "meta": { + "limit": 0, + "offset": 0, + "total_count": 316 + }, + "next": { + "cpu": 5, + "dssd": 1, + "ip": 1, + "mem": 7, + "msft_7jq_00341": 1, + "msft_7nq_00302": 1, + "msft_lwa_00135": 1, + "msft_p71_01031": 1, + "msft_p73_04837": 1, + "msft_p73_04837_core": 1, + "msft_tfa_00009": 1, + "msft_tfa_00523": 1, + "sms": 1, + "ssd": 1, + "tx": 10, + "vlan": 1 + }, + "objects": [ + { + "currency": "CHF", + "id": "18", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.26600000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "EUR", + "id": "20", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.21000000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "GBP", + "id": "22", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.18200000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "USD", + "id": "24", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.28000000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "CHF", + "id": "26", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.76000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "EUR", + "id": "28", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.60000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "GBP", + "id": "30", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.52000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "USD", + "id": "32", + "level": 1, + "multiplier": 2783138807808000, + "price": "0.80000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "CHF", + "id": "34", + "level": 1, + "multiplier": 2592000, + "price": "4.75000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "EUR", + "id": "36", + "level": 1, + "multiplier": 2592000, + "price": "3.75000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "GBP", + "id": "38", + "level": 1, + "multiplier": 2592000, + "price": "3.25000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "USD", + "id": "40", + "level": 1, + "multiplier": 2592000, + "price": "5.00000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "USD", + "id": "8", + "level": 1, + "multiplier": 3600000, + "price": "0.00790500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "42", + "level": 1, + "multiplier": 2592000, + "price": "19.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "USD", + "id": "104", + "level": 2, + "multiplier": 3600000, + "price": "0.00816000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "44", + "level": 1, + "multiplier": 2592000, + "price": "15.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "USD", + "id": "108", + "level": 3, + "multiplier": 3600000, + "price": "0.00841500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "46", + "level": 1, + "multiplier": 2592000, + "price": "13.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "USD", + "id": "112", + "level": 4, + "multiplier": 3600000, + "price": "0.00867000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "48", + "level": 1, + "multiplier": 2592000, + "price": "20.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "CHF", + "id": "2", + "level": 1, + "multiplier": 3600000, + "price": "0.00875500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "54", + "level": 1, + "multiplier": 1073741824, + "price": "0.01950000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "101", + "level": 2, + "multiplier": 3600000, + "price": "0.00901000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "105", + "level": 3, + "multiplier": 3600000, + "price": "0.00935000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "109", + "level": 4, + "multiplier": 3600000, + "price": "0.00960500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "113", + "level": 5, + "multiplier": 3600000, + "price": "0.01028500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "117", + "level": 6, + "multiplier": 3600000, + "price": "0.01088000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "121", + "level": 7, + "multiplier": 3600000, + "price": "0.01207000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "125", + "level": 8, + "multiplier": 3600000, + "price": "0.01385500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "10", + "level": 1, + "multiplier": 3865470566400, + "price": "0.01256000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "133", + "level": 10, + "multiplier": 3865470566400, + "price": "0.02080000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "137", + "level": 11, + "multiplier": 3865470566400, + "price": "0.02192000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "141", + "level": 12, + "multiplier": 3865470566400, + "price": "0.02288000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "116", + "level": 5, + "multiplier": 3600000, + "price": "0.00926500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "120", + "level": 6, + "multiplier": 3600000, + "price": "0.00977500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "124", + "level": 7, + "multiplier": 3600000, + "price": "0.01088000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "16", + "level": 1, + "multiplier": 3865470566400, + "price": "0.01128000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "136", + "level": 10, + "multiplier": 3865470566400, + "price": "0.01872000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "140", + "level": 11, + "multiplier": 3865470566400, + "price": "0.01968000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "144", + "level": 12, + "multiplier": 3865470566400, + "price": "0.02064000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "148", + "level": 13, + "multiplier": 3865470566400, + "price": "0.02136000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "152", + "level": 14, + "multiplier": 3865470566400, + "price": "0.02184000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "156", + "level": 15, + "multiplier": 3865470566400, + "price": "0.02232000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "160", + "level": 16, + "multiplier": 3865470566400, + "price": "0.02280000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "4", + "level": 1, + "multiplier": 3600000, + "price": "0.00612000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "102", + "level": 2, + "multiplier": 3600000, + "price": "0.00637500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "106", + "level": 3, + "multiplier": 3600000, + "price": "0.00654500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "110", + "level": 4, + "multiplier": 3600000, + "price": "0.00671500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "114", + "level": 5, + "multiplier": 3600000, + "price": "0.00714000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "128", + "level": 8, + "multiplier": 3600000, + "price": "0.01249500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "132", + "level": 9, + "multiplier": 3600000, + "price": "0.01411000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "60", + "level": 10, + "multiplier": 3600000, + "price": "0.01623500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "64", + "level": 11, + "multiplier": 3600000, + "price": "0.01734000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "68", + "level": 12, + "multiplier": 3600000, + "price": "0.01844500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "72", + "level": 13, + "multiplier": 3600000, + "price": "0.01921000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "76", + "level": 14, + "multiplier": 3600000, + "price": "0.01980500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "80", + "level": 15, + "multiplier": 3600000, + "price": "0.02031500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "118", + "level": 6, + "multiplier": 3600000, + "price": "0.00756500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "122", + "level": 7, + "multiplier": 3600000, + "price": "0.00841500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "126", + "level": 8, + "multiplier": 3600000, + "price": "0.00969000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "130", + "level": 9, + "multiplier": 3600000, + "price": "0.01096500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "58", + "level": 10, + "multiplier": 3600000, + "price": "0.01266500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "62", + "level": 11, + "multiplier": 3600000, + "price": "0.01351500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "66", + "level": 12, + "multiplier": 3600000, + "price": "0.01428000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "145", + "level": 13, + "multiplier": 3865470566400, + "price": "0.02376000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "149", + "level": 14, + "multiplier": 3865470566400, + "price": "0.02424000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "153", + "level": 15, + "multiplier": 3865470566400, + "price": "0.02480000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "157", + "level": 16, + "multiplier": 3865470566400, + "price": "0.02536000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "50", + "level": 1, + "multiplier": 1073741824, + "price": "0.03250000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "253", + "level": 2, + "multiplier": 1073741824, + "price": "0.03420000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "257", + "level": 3, + "multiplier": 1073741824, + "price": "0.03750000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "261", + "level": 4, + "multiplier": 1073741824, + "price": "0.04250000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "265", + "level": 5, + "multiplier": 1073741824, + "price": "0.04750000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "269", + "level": 6, + "multiplier": 1073741824, + "price": "0.05170000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "273", + "level": 7, + "multiplier": 1073741824, + "price": "0.05580000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "277", + "level": 8, + "multiplier": 1073741824, + "price": "0.05920000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "281", + "level": 9, + "multiplier": 1073741824, + "price": "0.06250000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "209", + "level": 10, + "multiplier": 1073741824, + "price": "0.06500000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "213", + "level": 11, + "multiplier": 1073741824, + "price": "0.06750000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "217", + "level": 12, + "multiplier": 1073741824, + "price": "0.07000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "221", + "level": 13, + "multiplier": 1073741824, + "price": "0.07330000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "225", + "level": 14, + "multiplier": 1073741824, + "price": "0.07830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "229", + "level": 15, + "multiplier": 1073741824, + "price": "0.08330000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "233", + "level": 16, + "multiplier": 1073741824, + "price": "0.08830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "84", + "level": 16, + "multiplier": 3600000, + "price": "0.02091000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "5", + "level": 0, + "multiplier": 3600000, + "price": "0.01105000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "13", + "level": 0, + "multiplier": 3865470566400, + "price": "0.01352000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "53", + "level": 0, + "multiplier": 1073741824, + "price": "0.04225000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "255", + "level": 2, + "multiplier": 1073741824, + "price": "0.02050000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "129", + "level": 9, + "multiplier": 3600000, + "price": "0.01564000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "57", + "level": 10, + "multiplier": 3600000, + "price": "0.01810500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "61", + "level": 11, + "multiplier": 3600000, + "price": "0.01929500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "65", + "level": 12, + "multiplier": 3600000, + "price": "0.02048500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "69", + "level": 13, + "multiplier": 3600000, + "price": "0.02142000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "73", + "level": 14, + "multiplier": 3600000, + "price": "0.02193000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "77", + "level": 15, + "multiplier": 3600000, + "price": "0.02261000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "81", + "level": 16, + "multiplier": 3600000, + "price": "0.02320500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "259", + "level": 3, + "multiplier": 1073741824, + "price": "0.02250000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "85", + "level": 17, + "multiplier": 3600000, + "price": "0.02558500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "263", + "level": 4, + "multiplier": 1073741824, + "price": "0.02550000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "89", + "level": 18, + "multiplier": 3600000, + "price": "0.02805000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "267", + "level": 5, + "multiplier": 1073741824, + "price": "0.02850000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "271", + "level": 6, + "multiplier": 1073741824, + "price": "0.03100000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "210", + "level": 10, + "multiplier": 1073741824, + "price": "0.04550000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "212", + "level": 10, + "multiplier": 1073741824, + "price": "0.05850000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "275", + "level": 7, + "multiplier": 1073741824, + "price": "0.03350000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "214", + "level": 11, + "multiplier": 1073741824, + "price": "0.04730000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "216", + "level": 11, + "multiplier": 1073741824, + "price": "0.06080000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "279", + "level": 8, + "multiplier": 1073741824, + "price": "0.03550000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "218", + "level": 12, + "multiplier": 1073741824, + "price": "0.04900000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "220", + "level": 12, + "multiplier": 1073741824, + "price": "0.06300000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "283", + "level": 9, + "multiplier": 1073741824, + "price": "0.03750000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "222", + "level": 13, + "multiplier": 1073741824, + "price": "0.05130000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "224", + "level": 13, + "multiplier": 1073741824, + "price": "0.06600000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "211", + "level": 10, + "multiplier": 1073741824, + "price": "0.03900000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "226", + "level": 14, + "multiplier": 1073741824, + "price": "0.05480000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "228", + "level": 14, + "multiplier": 1073741824, + "price": "0.07050000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "215", + "level": 11, + "multiplier": 1073741824, + "price": "0.04050000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "230", + "level": 15, + "multiplier": 1073741824, + "price": "0.05830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "232", + "level": 15, + "multiplier": 1073741824, + "price": "0.07500000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "219", + "level": 12, + "multiplier": 1073741824, + "price": "0.04200000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "234", + "level": 16, + "multiplier": 1073741824, + "price": "0.06180000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "236", + "level": 16, + "multiplier": 1073741824, + "price": "0.07950000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "223", + "level": 13, + "multiplier": 1073741824, + "price": "0.04400000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "238", + "level": 17, + "multiplier": 1073741824, + "price": "0.06530000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "240", + "level": 17, + "multiplier": 1073741824, + "price": "0.08400000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "227", + "level": 14, + "multiplier": 1073741824, + "price": "0.04700000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "242", + "level": 18, + "multiplier": 1073741824, + "price": "0.07000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "244", + "level": 18, + "multiplier": 1073741824, + "price": "0.09000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "231", + "level": 15, + "multiplier": 1073741824, + "price": "0.05000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "246", + "level": 19, + "multiplier": 1073741824, + "price": "0.07580000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "248", + "level": 19, + "multiplier": 1073741824, + "price": "0.09750000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "52", + "level": 1, + "multiplier": 1073741824, + "price": "0.02280000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "56", + "level": 1, + "multiplier": 1073741824, + "price": "0.02930000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "235", + "level": 16, + "multiplier": 1073741824, + "price": "0.05300000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "250", + "level": 20, + "multiplier": 1073741824, + "price": "0.08280000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "252", + "level": 20, + "multiplier": 1073741824, + "price": "0.10650000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "239", + "level": 17, + "multiplier": 1073741824, + "price": "0.05600000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "254", + "level": 2, + "multiplier": 1073741824, + "price": "0.02390000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "256", + "level": 2, + "multiplier": 1073741824, + "price": "0.03080000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "243", + "level": 18, + "multiplier": 1073741824, + "price": "0.06000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "258", + "level": 3, + "multiplier": 1073741824, + "price": "0.02630000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "260", + "level": 3, + "multiplier": 1073741824, + "price": "0.03380000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "247", + "level": 19, + "multiplier": 1073741824, + "price": "0.06500000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "262", + "level": 4, + "multiplier": 1073741824, + "price": "0.02980000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "264", + "level": 4, + "multiplier": 1073741824, + "price": "0.03830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "GBP", + "id": "251", + "level": 20, + "multiplier": 1073741824, + "price": "0.07100000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "266", + "level": 5, + "multiplier": 1073741824, + "price": "0.03330000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "268", + "level": 5, + "multiplier": 1073741824, + "price": "0.04280000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "270", + "level": 6, + "multiplier": 1073741824, + "price": "0.03620000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "272", + "level": 6, + "multiplier": 1073741824, + "price": "0.04650000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "274", + "level": 7, + "multiplier": 1073741824, + "price": "0.03910000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "276", + "level": 7, + "multiplier": 1073741824, + "price": "0.05030000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "278", + "level": 8, + "multiplier": 1073741824, + "price": "0.04140000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "280", + "level": 8, + "multiplier": 1073741824, + "price": "0.05330000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "282", + "level": 9, + "multiplier": 1073741824, + "price": "0.04380000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "284", + "level": 9, + "multiplier": 1073741824, + "price": "0.05630000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "180", + "level": 2, + "multiplier": 3865470566400, + "price": "0.01152000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "93", + "level": 19, + "multiplier": 3600000, + "price": "0.03162000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "CHF", + "id": "97", + "level": 20, + "multiplier": 3600000, + "price": "0.03638000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "184", + "level": 3, + "multiplier": 3865470566400, + "price": "0.01176000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "306", + "level": 0, + "multiplier": 2592000, + "price": "15.69000000000000000000", + "resource": "msft_7nq_00302", + "unit": "" + }, + { + "currency": "CHF", + "id": "297", + "level": 0, + "multiplier": 2592000, + "price": "8.50000000000000000000", + "resource": "msft_p71_01031", + "unit": "" + }, + { + "currency": "CHF", + "id": "301", + "level": 0, + "multiplier": 2592000, + "price": "1.60000000000000000000", + "resource": "msft_p73_04837_core", + "unit": "" + }, + { + "currency": "CHF", + "id": "307", + "level": 0, + "multiplier": 2592000, + "price": "0.98000000000000000000", + "resource": "msft_tfa_00523", + "unit": "" + }, + { + "currency": "EUR", + "id": "295", + "level": 0, + "multiplier": 2592000, + "price": "350.00000000000000000000", + "resource": "msft_tfa_00009", + "unit": "count/month" + }, + { + "currency": "EUR", + "id": "313", + "level": 0, + "multiplier": 2592000, + "price": "0.73010000000000000000", + "resource": "msft_tfa_00523", + "unit": "" + }, + { + "currency": "GBP", + "id": "314", + "level": 0, + "multiplier": 2592000, + "price": "38.51520000000000000000", + "resource": "msft_7jq_00341", + "unit": "" + }, + { + "currency": "GBP", + "id": "315", + "level": 0, + "multiplier": 2592000, + "price": "10.04160000000000000000", + "resource": "msft_7nq_00302", + "unit": "" + }, + { + "currency": "GBP", + "id": "288", + "level": 0, + "multiplier": 2592000, + "price": "15.00000000000000000000", + "resource": "msft_lwa_00135", + "unit": "count/month" + }, + { + "currency": "GBP", + "id": "300", + "level": 0, + "multiplier": 2592000, + "price": "5.44000000000000000000", + "resource": "msft_p71_01031", + "unit": "" + }, + { + "currency": "GBP", + "id": "292", + "level": 0, + "multiplier": 2592000, + "price": "27.00000000000000000000", + "resource": "msft_p73_04837", + "unit": "count/month" + }, + { + "currency": "GBP", + "id": "304", + "level": 0, + "multiplier": 2592000, + "price": "1.02400000000000000000", + "resource": "msft_p73_04837_core", + "unit": "" + }, + { + "currency": "GBP", + "id": "296", + "level": 0, + "multiplier": 2592000, + "price": "300.00000000000000000000", + "resource": "msft_tfa_00009", + "unit": "count/month" + }, + { + "currency": "GBP", + "id": "316", + "level": 0, + "multiplier": 2592000, + "price": "0.62720000000000000000", + "resource": "msft_tfa_00523", + "unit": "" + }, + { + "currency": "USD", + "id": "188", + "level": 4, + "multiplier": 3865470566400, + "price": "0.01200000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "192", + "level": 5, + "multiplier": 3865470566400, + "price": "0.01248000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "196", + "level": 6, + "multiplier": 3865470566400, + "price": "0.01296000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "200", + "level": 7, + "multiplier": 3865470566400, + "price": "0.01392000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "204", + "level": 8, + "multiplier": 3865470566400, + "price": "0.01536000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "208", + "level": 9, + "multiplier": 3865470566400, + "price": "0.01680000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "164", + "level": 17, + "multiplier": 3865470566400, + "price": "0.02472000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "168", + "level": 18, + "multiplier": 3865470566400, + "price": "0.02664000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "172", + "level": 19, + "multiplier": 3865470566400, + "price": "0.02952000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "176", + "level": 20, + "multiplier": 3865470566400, + "price": "0.03336000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "178", + "level": 2, + "multiplier": 3865470566400, + "price": "0.00896000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "182", + "level": 3, + "multiplier": 3865470566400, + "price": "0.00912000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "186", + "level": 4, + "multiplier": 3865470566400, + "price": "0.00936000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "190", + "level": 5, + "multiplier": 3865470566400, + "price": "0.00976000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "194", + "level": 6, + "multiplier": 3865470566400, + "price": "0.01008000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "198", + "level": 7, + "multiplier": 3865470566400, + "price": "0.01080000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "202", + "level": 8, + "multiplier": 3865470566400, + "price": "0.01200000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "206", + "level": 9, + "multiplier": 3865470566400, + "price": "0.01304000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "162", + "level": 17, + "multiplier": 3865470566400, + "price": "0.01920000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "166", + "level": 18, + "multiplier": 3865470566400, + "price": "0.02072000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "170", + "level": 19, + "multiplier": 3865470566400, + "price": "0.02296000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "174", + "level": 20, + "multiplier": 3865470566400, + "price": "0.02592000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "179", + "level": 2, + "multiplier": 3865470566400, + "price": "0.00768000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "183", + "level": 3, + "multiplier": 3865470566400, + "price": "0.00784000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "187", + "level": 4, + "multiplier": 3865470566400, + "price": "0.00800000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "191", + "level": 5, + "multiplier": 3865470566400, + "price": "0.00832000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "195", + "level": 6, + "multiplier": 3865470566400, + "price": "0.00864000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "1", + "level": 0, + "multiplier": 3600000, + "price": "0.01700000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "3", + "level": 0, + "multiplier": 3600000, + "price": "0.01360000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "11", + "level": 0, + "multiplier": 3865470566400, + "price": "0.01664000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "51", + "level": 0, + "multiplier": 1073741824, + "price": "0.05200000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "EUR", + "id": "19", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.16000000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "EUR", + "id": "27", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.16000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "EUR", + "id": "35", + "level": 0, + "multiplier": 2592000, + "price": "4.00000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "EUR", + "id": "43", + "level": 0, + "multiplier": 2592000, + "price": "8.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "GBP", + "id": "21", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.01352000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "GBP", + "id": "29", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.01352000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "GBP", + "id": "37", + "level": 0, + "multiplier": 2592000, + "price": "3.25000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "GBP", + "id": "45", + "level": 0, + "multiplier": 2592000, + "price": "6.50000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "CHF", + "id": "9", + "level": 0, + "multiplier": 3865470566400, + "price": "0.02080000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "49", + "level": 0, + "multiplier": 1073741824, + "price": "0.06500000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "17", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.20000000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "CHF", + "id": "25", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.45000000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "CHF", + "id": "33", + "level": 0, + "multiplier": 2592000, + "price": "5.00000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "CHF", + "id": "41", + "level": 0, + "multiplier": 2592000, + "price": "9.50000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "USD", + "id": "7", + "level": 0, + "multiplier": 3600000, + "price": "0.01640000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "15", + "level": 0, + "multiplier": 3865470566400, + "price": "0.01840000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "USD", + "id": "55", + "level": 0, + "multiplier": 1073741824, + "price": "0.05850000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "23", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.18000000000000000000", + "resource": "dssd", + "unit": "GB/month" + }, + { + "currency": "USD", + "id": "31", + "level": 0, + "multiplier": 2783138807808000, + "price": "0.47400000000000000000", + "resource": "ssd", + "unit": "GB/month" + }, + { + "currency": "USD", + "id": "39", + "level": 0, + "multiplier": 2592000, + "price": "4.50000000000000000000", + "resource": "ip", + "unit": "IP" + }, + { + "currency": "USD", + "id": "47", + "level": 0, + "multiplier": 2592000, + "price": "10.00000000000000000000", + "resource": "vlan", + "unit": "VLAN" + }, + { + "currency": "CHF", + "id": "177", + "level": 2, + "multiplier": 3865470566400, + "price": "0.01280000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "181", + "level": 3, + "multiplier": 3865470566400, + "price": "0.01304000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "185", + "level": 4, + "multiplier": 3865470566400, + "price": "0.01328000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "189", + "level": 5, + "multiplier": 3865470566400, + "price": "0.01392000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "193", + "level": 6, + "multiplier": 3865470566400, + "price": "0.01440000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "197", + "level": 7, + "multiplier": 3865470566400, + "price": "0.01552000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "201", + "level": 8, + "multiplier": 3865470566400, + "price": "0.01712000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "205", + "level": 9, + "multiplier": 3865470566400, + "price": "0.01872000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "161", + "level": 17, + "multiplier": 3865470566400, + "price": "0.02744000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "165", + "level": 18, + "multiplier": 3865470566400, + "price": "0.02960000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "169", + "level": 19, + "multiplier": 3865470566400, + "price": "0.03280000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "173", + "level": 20, + "multiplier": 3865470566400, + "price": "0.03704000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "237", + "level": 17, + "multiplier": 1073741824, + "price": "0.09330000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "241", + "level": 18, + "multiplier": 1073741824, + "price": "0.10000000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "245", + "level": 19, + "multiplier": 1073741824, + "price": "0.10830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "CHF", + "id": "249", + "level": 20, + "multiplier": 1073741824, + "price": "0.11830000000000000000", + "resource": "tx", + "unit": "GB" + }, + { + "currency": "USD", + "id": "88", + "level": 17, + "multiplier": 3600000, + "price": "0.02303500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "92", + "level": 18, + "multiplier": 3600000, + "price": "0.02524500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "96", + "level": 19, + "multiplier": 3600000, + "price": "0.02847500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "USD", + "id": "100", + "level": 20, + "multiplier": 3600000, + "price": "0.03281000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "70", + "level": 13, + "multiplier": 3600000, + "price": "0.01496000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "74", + "level": 14, + "multiplier": 3600000, + "price": "0.01538500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "78", + "level": 15, + "multiplier": 3600000, + "price": "0.01581000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "82", + "level": 16, + "multiplier": 3600000, + "price": "0.01623500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "86", + "level": 17, + "multiplier": 3600000, + "price": "0.01793500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "90", + "level": 18, + "multiplier": 3600000, + "price": "0.01955000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "94", + "level": 19, + "multiplier": 3600000, + "price": "0.02210000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "98", + "level": 20, + "multiplier": 3600000, + "price": "0.02550000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "EUR", + "id": "12", + "level": 1, + "multiplier": 3865470566400, + "price": "0.00880000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "134", + "level": 10, + "multiplier": 3865470566400, + "price": "0.01456000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "138", + "level": 11, + "multiplier": 3865470566400, + "price": "0.01528000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "142", + "level": 12, + "multiplier": 3865470566400, + "price": "0.01608000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "146", + "level": 13, + "multiplier": 3865470566400, + "price": "0.01664000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "150", + "level": 14, + "multiplier": 3865470566400, + "price": "0.01696000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "154", + "level": 15, + "multiplier": 3865470566400, + "price": "0.01736000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "EUR", + "id": "158", + "level": 16, + "multiplier": 3865470566400, + "price": "0.01776000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "6", + "level": 1, + "multiplier": 3600000, + "price": "0.00527000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "103", + "level": 2, + "multiplier": 3600000, + "price": "0.00544000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "107", + "level": 3, + "multiplier": 3600000, + "price": "0.00561000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "111", + "level": 4, + "multiplier": 3600000, + "price": "0.00578000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "115", + "level": 5, + "multiplier": 3600000, + "price": "0.00612000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "119", + "level": 6, + "multiplier": 3600000, + "price": "0.00654500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "123", + "level": 7, + "multiplier": 3600000, + "price": "0.00722500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "127", + "level": 8, + "multiplier": 3600000, + "price": "0.00833000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "131", + "level": 9, + "multiplier": 3600000, + "price": "0.00943500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "59", + "level": 10, + "multiplier": 3600000, + "price": "0.01088000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "63", + "level": 11, + "multiplier": 3600000, + "price": "0.01156000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "67", + "level": 12, + "multiplier": 3600000, + "price": "0.01232500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "71", + "level": 13, + "multiplier": 3600000, + "price": "0.01283500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "75", + "level": 14, + "multiplier": 3600000, + "price": "0.01326000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "79", + "level": 15, + "multiplier": 3600000, + "price": "0.01360000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "83", + "level": 16, + "multiplier": 3600000, + "price": "0.01394000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "87", + "level": 17, + "multiplier": 3600000, + "price": "0.01538500000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "91", + "level": 18, + "multiplier": 3600000, + "price": "0.01683000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "95", + "level": 19, + "multiplier": 3600000, + "price": "0.01904000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "99", + "level": 20, + "multiplier": 3600000, + "price": "0.02193000000000000000", + "resource": "cpu", + "unit": "GHz/hour" + }, + { + "currency": "GBP", + "id": "14", + "level": 1, + "multiplier": 3865470566400, + "price": "0.00752000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "199", + "level": 7, + "multiplier": 3865470566400, + "price": "0.00928000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "203", + "level": 8, + "multiplier": 3865470566400, + "price": "0.01024000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "207", + "level": 9, + "multiplier": 3865470566400, + "price": "0.01120000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "135", + "level": 10, + "multiplier": 3865470566400, + "price": "0.01248000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "139", + "level": 11, + "multiplier": 3865470566400, + "price": "0.01312000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "143", + "level": 12, + "multiplier": 3865470566400, + "price": "0.01376000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "147", + "level": 13, + "multiplier": 3865470566400, + "price": "0.01424000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "151", + "level": 14, + "multiplier": 3865470566400, + "price": "0.01456000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "155", + "level": 15, + "multiplier": 3865470566400, + "price": "0.01488000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "159", + "level": 16, + "multiplier": 3865470566400, + "price": "0.01520000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "163", + "level": 17, + "multiplier": 3865470566400, + "price": "0.01648000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "167", + "level": 18, + "multiplier": 3865470566400, + "price": "0.01776000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "171", + "level": 19, + "multiplier": 3865470566400, + "price": "0.01968000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "GBP", + "id": "175", + "level": 20, + "multiplier": 3865470566400, + "price": "0.02224000000000000000", + "resource": "mem", + "unit": "GB/hour" + }, + { + "currency": "CHF", + "id": "305", + "level": 0, + "multiplier": 2592000, + "price": "60.18000000000000000000", + "resource": "msft_7jq_00341", + "unit": "" + }, + { + "currency": "CHF", + "id": "285", + "level": 0, + "multiplier": 2592000, + "price": "25.00000000000000000000", + "resource": "msft_lwa_00135", + "unit": "count/month" + }, + { + "currency": "CHF", + "id": "289", + "level": 0, + "multiplier": 2592000, + "price": "45.00000000000000000000", + "resource": "msft_p73_04837", + "unit": "count/month" + }, + { + "currency": "CHF", + "id": "293", + "level": 0, + "multiplier": 2592000, + "price": "500.00000000000000000000", + "resource": "msft_tfa_00009", + "unit": "count/month" + }, + { + "currency": "USD", + "id": "308", + "level": 0, + "multiplier": 2592000, + "price": "58.07370000000000000000", + "resource": "msft_7jq_00341", + "unit": "" + }, + { + "currency": "USD", + "id": "309", + "level": 0, + "multiplier": 2592000, + "price": "15.14085000000000000000", + "resource": "msft_7nq_00302", + "unit": "" + }, + { + "currency": "USD", + "id": "286", + "level": 0, + "multiplier": 2592000, + "price": "22.50000000000000000000", + "resource": "msft_lwa_00135", + "unit": "count/month" + }, + { + "currency": "USD", + "id": "298", + "level": 0, + "multiplier": 2592000, + "price": "8.20250000000000000000", + "resource": "msft_p71_01031", + "unit": "" + }, + { + "currency": "USD", + "id": "290", + "level": 0, + "multiplier": 2592000, + "price": "40.50000000000000000000", + "resource": "msft_p73_04837", + "unit": "count/month" + }, + { + "currency": "USD", + "id": "302", + "level": 0, + "multiplier": 2592000, + "price": "1.54400000000000000000", + "resource": "msft_p73_04837_core", + "unit": "" + }, + { + "currency": "USD", + "id": "294", + "level": 0, + "multiplier": 2592000, + "price": "450.00000000000000000000", + "resource": "msft_tfa_00009", + "unit": "count/month" + }, + { + "currency": "USD", + "id": "310", + "level": 0, + "multiplier": 2592000, + "price": "0.94570000000000000000", + "resource": "msft_tfa_00523", + "unit": "" + }, + { + "currency": "EUR", + "id": "311", + "level": 0, + "multiplier": 2592000, + "price": "44.83410000000000000000", + "resource": "msft_7jq_00341", + "unit": "" + }, + { + "currency": "EUR", + "id": "312", + "level": 0, + "multiplier": 2592000, + "price": "11.68905000000000000000", + "resource": "msft_7nq_00302", + "unit": "" + }, + { + "currency": "EUR", + "id": "287", + "level": 0, + "multiplier": 2592000, + "price": "17.50000000000000000000", + "resource": "msft_lwa_00135", + "unit": "count/month" + }, + { + "currency": "EUR", + "id": "299", + "level": 0, + "multiplier": 2592000, + "price": "6.33250000000000000000", + "resource": "msft_p71_01031", + "unit": "" + }, + { + "currency": "EUR", + "id": "291", + "level": 0, + "multiplier": 2592000, + "price": "31.50000000000000000000", + "resource": "msft_p73_04837", + "unit": "count/month" + }, + { + "currency": "EUR", + "id": "303", + "level": 0, + "multiplier": 2592000, + "price": "1.19200000000000000000", + "resource": "msft_p73_04837_core", + "unit": "" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_attach_policy.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,43 @@ +{ + "context": true, + "cpu": 2000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 536870912, + "meta": {}, + "name": "test_server_updated", + "nics": [ + { + "boot_order": null, + "firewall_policy": { + "resource_uri": "/api/2.0/fwpolicies/461dfb8c-e641-43d7-a20e-32e2aa399086/", + "uuid": "461dfb8c-e641-43d7-a20e-32e2aa399086" + }, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:58:83:c4:07:fc", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/d6bde7f2-69ca-4825-909b-fcc08ea928ef/", + "runtime": null, + "smp": 1, + "status": "stopped", + "tags": [], + "uuid": "d6bde7f2-69ca-4825-909b-fcc08ea928ef", + "vnc_password": "updated_password" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_avail_groups.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +[ + [ + "63456dc4-36f9-4a3f-b478-4d376b8ff5a9", + "ede05e68-c997-4aad-816a-39469fd1a562" + ], + [ + "ad8caf99-45c4-45fc-8ba7-acb8a68be66f", + "4b9e1487-0b80-4f65-9c3e-e840dde27ccd" + ], + [ + "658bafdf-8fbf-4fc6-be4f-74ecc7f0e8a5" + ] +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_clone.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,61 @@ +{ + "context": true, + "cpu": 1000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [ + { + "boot_order": 1, + "dev_channel": "0:0", + "device": "ide", + "drive": { + "resource_uri": "/api/2.0/drives/f1e42abe-f7db-4dcc-b37e-e53aca7a3ba9/", + "uuid": "f1e42abe-f7db-4dcc-b37e-e53aca7a3ba9" + } + } + ], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 2147483648, + "meta": { + "description": "ddd", + "ssh_public_key": "" + }, + "name": "test cloned node", + "nics": [ + { + "boot_order": null, + "firewall_policy": { + "resource_uri": "/api/2.0/fwpolicies/461dfb8c-e641-43d7-a20e-32e2aa399086/", + "uuid": "461dfb8c-e641-43d7-a20e-32e2aa399086" + }, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:76:4c:96:e1:98", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/470ea5b9-3beb-4506-9cac-e3c63002480b/", + "runtime": null, + "smp": 1, + "status": "cloning", + "tags": [ + { + "resource_uri": "/api/2.0/tags/e60bb2d2-08d4-4255-adac-5faf87efcdd2/", + "uuid": "e60bb2d2-08d4-4255-adac-5faf87efcdd2" + } + ], + "uuid": "470ea5b9-3beb-4506-9cac-e3c63002480b", + "vnc_password": "xxxx" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_close_vnc.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "action": "close_vnc", + "result": "success", + "uuid": "2e64e5e4-f31d-471a-ac1b-1ae079652e40" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,44 @@ +{ + "objects": [ + { + "context": true, + "cpu": 1000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 536870912, + "meta": {}, + "name": "test node", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:08:76:b1:ce:41", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/9de75ed6_fd33_45e2_963f_d405f31fd911/", + "runtime": null, + "smp": 1, + "status": "started", + "tags": [], + "uuid": "9de75ed6_fd33_45e2_963f_d405f31fd911", + "vnc_password": "testserver" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_create_with_vlan.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,68 @@ +{ + "objects": [ + { + "context": true, + "cpu": 1100, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [ + { + "boot_order": 1, + "dev_channel": "0:0", + "device": "ide", + "drive": { + "resource_uri": "/api/2.0/drives/7c0efbb2-b1e8-4e77-9d72-9f9f9d75ae7b/", + "uuid": "7c0efbb2-b1e8-4e77-9d72-9f9f9d75ae7b" + }, + "runtime": null + } + ], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 671088640, + "meta": {}, + "name": "test node vlan", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:94:75:3c:16:34", + "model": "virtio", + "runtime": null, + "vlan": null + }, + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": null, + "ip_v6_conf": null, + "mac": "22:84:c4:af:f3:fc", + "model": "virtio", + "runtime": null, + "vlan": { + "resource_uri": "/api/2.0/vlans/39ae851d-433f-4ac2-a803-ffa24cb1fa3e/", + "uuid": "39ae851d-433f-4ac2-a803-ffa24cb1fa3e" + } + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/c8b034fb-9e66-4892-be12-a36121d4b704/", + "runtime": null, + "smp": 1, + "status": "stopped", + "tags": [], + "uuid": "9de75ed6_fd33_45e2_963f_d405f31fd911", + "vnc_password": "testserver" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_all_stopped.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,104 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 2 + }, + "objects": [ + { + "context": true, + "cpu": 1000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 1073741824, + "meta": { + "description": "test description 2", + "ssh_public_key": "" + }, + "name": "test no drives", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:98:ce:04:50:df", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/9de75ed6-fd33-45e2-963f-d405f31fd911/", + "runtime": null, + "smp": 1, + "status": "stopped", + "tags": [], + "uuid": "9de75ed6-fd33-45e2-963f-d405f31fd911", + "vnc_password": "bar" + }, + { + "context": true, + "cpu": 2000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [ + { + "boot_order": 1, + "dev_channel": "0:0", + "device": "ide", + "drive": { + "resource_uri": "/api/2.0/drives/3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9/", + "uuid": "3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9" + } + } + ], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 2147483648, + "meta": { + "description": "test1" + }, + "name": "test-1", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:22:4e:1e:e0:7e", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/9414bbeb-e908-4e55-ae3f-2eb61adc50d8/", + "runtime": null, + "smp": 1, + "status": "stopped", + "tags": [], + "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8", + "vnc_password": "foo" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_detail_mixed_state.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,162 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 2 + }, + "objects": [ + { + "context": true, + "cpu": 1000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 1073741824, + "meta": { + "description": "test description 2", + "ssh_public_key": "" + }, + "name": "test no drives", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:98:ce:04:50:df", + "model": "virtio", + "runtime": { + "interface_type": "public", + "io": { + "bytes_recv": "1323", + "bytes_sent": "21535", + "packets_recv": "3", + "packets_sent": "278" + }, + "ip_v4": { + "resource_uri": "/api/2.0/ips/185.12.5.181/", + "uuid": "185.12.5.181" + }, + "ip_v6": null + }, + "vlan": null + }, + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:2c:03:99:32:be", + "model": "virtio", + "runtime": { + "interface_type": "public", + "io": { + "bytes_recv": "0", + "bytes_sent": "0", + "packets_recv": "0", + "packets_sent": "0" + }, + "ip_v4": { + "resource_uri": "/api/2.0/ips/178.22.68.55/", + "uuid": "178.22.68.55" + }, + "ip_v6": null + }, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/9de75ed6-fd33-45e2-963f-d405f31fd911/", + "runtime": { + "active_since": "2013-11-05T10:15:42+00:00", + "nics": [ + { + "interface_type": "public", + "io": { + "bytes_recv": "1323", + "bytes_sent": "21535", + "packets_recv": "3", + "packets_sent": "278" + }, + "ip_v4": { + "resource_uri": "/api/2.0/ips/185.12.5.181/", + "uuid": "185.12.5.181" + }, + "ip_v6": null, + "mac": "22:98:ce:04:50:df" + } + ] + }, + "smp": 1, + "status": "running", + "tags": [], + "uuid": "9de75ed6-fd33-45e2-963f-d405f31fd911", + "vnc_password": "foo" + }, + { + "context": true, + "cpu": 2000, + "cpu_model": null, + "cpus_instead_of_cores": false, + "drives": [ + { + "boot_order": 1, + "dev_channel": "0:0", + "device": "ide", + "drive": { + "resource_uri": "/api/2.0/drives/3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9/", + "uuid": "3f74acec-ba3c-4efd-ab9e-5d95a4c5fca9" + } + } + ], + "enable_numa": false, + "hv_relaxed": false, + "hv_tsc": false, + "mem": 2147483648, + "meta": { + "description": "test1" + }, + "name": "test-1", + "nics": [ + { + "boot_order": null, + "firewall_policy": null, + "ip_v4_conf": { + "conf": "dhcp", + "ip": null + }, + "ip_v6_conf": null, + "mac": "22:22:4e:1e:e0:7e", + "model": "virtio", + "runtime": null, + "vlan": null + } + ], + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "requirements": [], + "resource_uri": "/api/2.0/servers/9414bbeb-e908-4e55-ae3f-2eb61adc50d8/", + "runtime": null, + "smp": 1, + "status": "stopped", + "tags": [], + "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8", + "vnc_password": "bar" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/servers_open_vnc.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "action": "open_vnc", + "result": "success", + "uuid": "2e64e5e4-f31d-471a-ac1b-1ae079652e40", + "vnc_url": "vnc://direct.lvs.cloudsigma.com:41111" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_already_started.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +[{"error_point": null, "error_type": "permission", "error_message": "Cannot start guest in state \"started\". Guest should be in state \"stopped\""}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/start_success.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +{"action": "start", "result": "success", "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8"} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_already_stopped.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +[{"error_point": null, "error_type": "permission", "error_message": "Cannot stop guest in state \"stopped\". Guest should be in state \"['started', 'running_legacy']\""}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/stop_success.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +{"action": "stop", "result": "success", "uuid": "9414bbeb-e908-4e55-ae3f-2eb61adc50d8"} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/subscriptions.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,105 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 5 + }, + "objects": [ + { + "amount": "1", + "auto_renew": true, + "descendants": [], + "discount_amount": null, + "discount_percent": null, + "end_time": "2014-02-20T11:12:34.930946+00:00", + "id": "7272", + "last_notification": null, + "period": "345 days, 0:00:00", + "price": "0E-20", + "remaining": "1", + "resource": "vlan", + "resource_uri": "/api/2.0/subscriptions/7272/", + "start_time": "2013-03-12T11:12:34.930946+00:00", + "status": "active", + "subscribed_object": "96537817-f4b6-496b-a861-e74192d3ccb0", + "uuid": "509f8e27-1e64-49bb-aa5a-baec074b0210" + }, + { + "amount": "1", + "auto_renew": true, + "descendants": [], + "discount_amount": null, + "discount_percent": null, + "end_time": "2014-02-20T11:12:41.837474+00:00", + "id": "7273", + "last_notification": null, + "period": "345 days, 0:00:00", + "price": "0E-20", + "remaining": "1", + "resource": "ip", + "resource_uri": "/api/2.0/subscriptions/7273/", + "start_time": "2013-03-12T11:12:41.837474+00:00", + "status": "active", + "subscribed_object": "185.12.6.183", + "uuid": "c2423c1a-8768-462c-bdc3-4ca09c1e650b" + }, + { + "amount": "17179869184", + "auto_renew": true, + "descendants": [], + "discount_amount": null, + "discount_percent": null, + "end_time": "2014-02-20T14:04:14.142181+00:00", + "id": "3985", + "last_notification": null, + "period": "365 days, 0:00:00", + "price": "0E-20", + "remaining": "17179869184", + "resource": "mem", + "resource_uri": "/api/2.0/subscriptions/3985/", + "start_time": "2013-02-20T14:04:14.142181+00:00", + "status": "active", + "subscribed_object": null, + "uuid": "9bb117d3-4bc5-4e2d-a907-b20abd48eaf9" + }, + { + "amount": "8000", + "auto_renew": true, + "descendants": [], + "discount_amount": null, + "discount_percent": null, + "end_time": "2014-02-20T14:04:29.040258+00:00", + "id": "3986", + "last_notification": null, + "period": "365 days, 0:00:00", + "price": "0E-20", + "remaining": "8000", + "resource": "cpu", + "resource_uri": "/api/2.0/subscriptions/3986/", + "start_time": "2013-02-20T14:04:29.040258+00:00", + "status": "active", + "subscribed_object": null, + "uuid": "a265c47f-1a00-4095-acfc-2193622bfbd8" + }, + { + "amount": "32212254720", + "auto_renew": true, + "descendants": [], + "discount_amount": null, + "discount_percent": null, + "end_time": "2014-02-20T14:04:44.088984+00:00", + "id": "3987", + "last_notification": null, + "period": "365 days, 0:00:00", + "price": "0E-20", + "remaining": "32212254720", + "resource": "dssd", + "resource_uri": "/api/2.0/subscriptions/3987/", + "start_time": "2013-02-20T14:04:44.088984+00:00", + "status": "active", + "subscribed_object": null, + "uuid": "8965ff95-4924-40a9-b923-a58615149732" + } + ], + "price": "0E-20" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "objects": [ + { + "meta": {}, + "name": "test tag 3", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/tags/c0008127-6dbf-4cf3-85f5-203f4c3967fa/", + "resources": [], + "uuid": "c0008127-6dbf-4cf3-85f5-203f4c3967fa" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_create_with_resources.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "objects": [ + { + "meta": {}, + "name": "test tag 3", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/tags/c0008127-6dbf-4cf3-85f5-203f4c3967fa/", + "resources": [ + { + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "res_type": "vlans", + "resource_uri": "/api/2.0/vlans/96537817-f4b6-496b-a861-e74192d3ccb0/", + "uuid": "1" + } + ], + "uuid": "c0008127-6dbf-4cf3-85f5-203f4c3967fa" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_detail.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "meta": { + "limit": 20, + "offset": 0, + "total_count": 2 + }, + "objects": [ + { + "meta": {}, + "name": "test tag 2", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/tags/a010ec41-2ead-4630-a1d0-237fa77e4d4d/", + "resources": [], + "uuid": "a010ec41-2ead-4630-a1d0-237fa77e4d4d" + }, + { + "meta": {}, + "name": "test tag 1", + "owner": { + "resource_uri": "/api/2.0/user/69fcfc03-d635-4f99-a8b3-e1b73637cb5d/", + "uuid": "69fcfc03-d635-4f99-a8b3-e1b73637cb5d" + }, + "resource_uri": "/api/2.0/tags/e60bb2d2-08d4-4255-adac-5faf87efcdd2/", + "resources": [], + "uuid": "e60bb2d2-08d4-4255-adac-5faf87efcdd2" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_get.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "meta": {}, + "name": "test tag 2", + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "resource_uri": "/api/2.0/tags/a010ec41-2ead-4630-a1d0-237fa77e4d4d/", + "resources": [], + "uuid": "a010ec41-2ead-4630-a1d0-237fa77e4d4d" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/tags_update.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,30 @@ +{ + "meta": {}, + "name": "test tag 3", + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "resource_uri": "/api/2.0/tags/900ac9c6-2f98-48a4-b406-5494b4ea4663/", + "resources": [ + { + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "res_type": "servers", + "resource_uri": "/api/2.0/servers/79f7853b-04bd-44f5-a2c2-fa56f6861994/", + "uuid": "79f7853b-04bd-44f5-a2c2-fa56f6861994" + }, + { + "owner": { + "resource_uri": "/api/2.0/user/5b4a69a3-8e78-4c45-a8ba-8b13f0895e23/", + "uuid": "5b4a69a3-8e78-4c45-a8ba-8b13f0895e23" + }, + "res_type": "drives", + "resource_uri": "/api/2.0/drives/8c48e0bd-e17b-49ca-8926-654107d2b7e7/", + "uuid": "8c48e0bd-e17b-49ca-8926-654107d2b7e7" + } + ], + "uuid": "900ac9c6-2f98-48a4-b406-5494b4ea4663" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudsigma_2_0/unknown_error.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +[{"error_point": null, "error_type": "backend", "error_message": "unknown error"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/associateIpAddress_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "associateipaddressresponse" : {"id":"10987171-8cc9-4d0a-b98f-1698c09ddd2d","jobid":11111} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/attachVolume_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "attachvolumeresponse" : {"jobid":"attachvolumejob"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupEgress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupEgress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupEgress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupEgress_default.json 2013-09-17 09:15:58.000000000 +0000 @@ -0,0 +1,2 @@ +{ "authorizesecuritygroupegressresponse" : {"jobid":17202} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/authorizeSecurityGroupIngress_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ +{ "authorizesecuritygroupingressresponse" : {"jobid":17200} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json 2014-06-27 11:27:01.000000000 +0000 @@ -0,0 +1 @@ +{ "createnetworkresponse" : { "network" : {"id":"a804d341-996e-4d9a-b2b0-226c648dc6e3","name":"test","displaytext":"test","broadcastdomaintype":"Lswitch","traffictype":"Guest","gateway":"10.1.1.1","netmask":"255.255.255.0","cidr":"10.1.1.0/24","zoneid":"2","zonename":"BETA-SBP-DC-1","networkofferingid":"c348cabe-0208-49e0-91ad-32b88c55fd8c","networkofferingname":"SourceNatNiciraNvpNetwork","networkofferingdisplaytext":"Offering for a Nicira Nvp isolated network with SourceNat","networkofferingconservemode":true,"networkofferingavailability":"Optional","issystem":false,"state":"Allocated","related":"a804d341-996e-4d9a-b2b0-226c648dc6e3","dns1":"8.8.8.8","dns2":"8.8.8.4","type":"Isolated","acltype":"Account","account":"rkuipers_admin","projectid":"d5f1209d-3a28-4dfb-8cc1-884e5d5e1d56","domainid":"4b6e626c-9d50-4480-bf77-daae632c7ffd","domain":"rkuipers","service":[{"name":"Firewall","capability":[{"name":"SupportedProtocols","value":"tcp,udp,icmp","canchooseservicecapability":false},{"name":"SupportedTrafficDirection","value":"ingress, egress","canchooseservicecapability":false},{"name":"MultipleIps","value":"true","canchooseservicecapability":false},{"name":"SupportedEgressProtocols","value":"tcp,udp,icmp, all","canchooseservicecapability":false},{"name":"TrafficStatistics","value":"per public ip","canchooseservicecapability":false}]},{"name":"StaticNat"},{"name":"Lb","capability":[{"name":"LbSchemes","value":"Public","canchooseservicecapability":false},{"name":"SupportedStickinessMethods","value":"[{\"methodname\":\"LbCookie\",\"paramlist\":[{\"paramname\":\"cookie-name\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"mode\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"nocache\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"indirect\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"postonly\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"domain\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is loadbalancer cookie based stickiness method.\"},{\"methodname\":\"AppCookie\",\"paramlist\":[{\"paramname\":\"cookie-name\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"length\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"holdtime\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"request-learn\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"prefix\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"mode\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is App session based sticky method. Define session stickiness on an existing application cookie. It can be used only for a specific http traffic\"},{\"methodname\":\"SourceBased\",\"paramlist\":[{\"paramname\":\"tablesize\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"expire\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is source based Stickiness method, it can be used for any type of protocol.\"}]","canchooseservicecapability":false},{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn,source","canchooseservicecapability":false},{"name":"SupportedProtocols","value":"tcp, udp","canchooseservicecapability":false}]},{"name":"SourceNat","capability":[{"name":"RedundantRouter","value":"true","canchooseservicecapability":false},{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false}]},{"name":"Dns","capability":[{"name":"AllowDnsSuffixModification","value":"true","canchooseservicecapability":false}]},{"name":"Connectivity"},{"name":"Vpn","capability":[{"name":"SupportedVpnTypes","value":"pptp,l2tp,ipsec","canchooseservicecapability":false},{"name":"VpnTypes","value":"removeaccessvpn","canchooseservicecapability":false}]},{"name":"Dhcp","capability":[{"name":"DhcpAccrossMultipleSubnets","value":"true","canchooseservicecapability":false}]},{"name":"UserData"},{"name":"PortForwarding"}],"networkdomain":"rkuipers.local","physicalnetworkid":"e48527a6-6882-4c5f-bce9-c02ecd5ef8c1","restartrequired":false,"specifyipranges":false,"vpcid":"22e8388c-21bf-4b84-8f20-e92a7f550898","canusefordeploy":true,"ispersistent":false,"tags":[],"displaynetwork":true} } } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createPortForwardingRule_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "createportforwardingruleresponse" : {"id":"bc7ea3ee-a2c3-4b86-a53f-01bdaa1b2e32","jobid":"11113"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createSecurityGroup_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"createsecuritygroupresponse":{"securitygroup":{"account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","id":"895f9e41-4d89-468e-9b69-19a8f0d3a889","domain":"runseb@gmail.com","name":"MySG"}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "createsshkeypairresponse" : { "keypair" : {"name":"test-keypair","fingerprint":"51:9f:81:30:ec:82:0c:e5:8c:81:ac:14:27:d0:e5:e2","privatekey":"-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDMaSZY4v228AWWcXYLoojgaZ+K8SbuI8YoPDEi9UWcww5mWSTx\nVl6Ksb8YPFxL6+3/unlfr4zK1LksxgN8XRuZr+YBFGphUB6a5EcyshkXi3mfAE7d\n6a26ah6ySXFK9GmZoXcJqQ1xLC9rKGPL7tWgHmbX1lCbN6QinV0mZVEHNwIDAQAB\nAoGACXQngN7mqwpIx99xfTJEMFTSOyPSEBt5c6zs/NfpI0nmJZej3MGI19NGqkFI\nZ35+4F/ocyN0WIEkG00BJkRMHWdPNd+YnVSuVgEyGCD8hDvBbUEQrmdZ0VfQt+2q\nd52g573s6D6Skk/SZHGi3yHca4H52c3EpLJzThxUmJSSqmECQQD0loEIiQzQaap3\n/Gce7nZeLCSNXf0Q5aKFQv/X22srw6YvJ9/25cLahiFtQUadId9VUXSYTgEKX0ST\nB2CZ4UJxAkEA1fK/PT+YIHaiQIiCK/xTnoIuTvdXmH0IozolRxGAKpQZNvaMpKgn\nvXU84/yztekEPG0pKmCm7CZUZoGdfiJoJwJALwUsAy8NtpdJvU1ZqbmgKdSEpmS2\nPORYjRPnSWEWRlCThyc8SCO9hPMaQ/2zjIuxep5xMsJ0MsFD1pwpdwu2EQJBAMrG\nEZ7ZQTOzfMAxIT7THeWjeIR7RNhP2PnrSB19Zr30M5m2P0Jn5ZJZJWbnwOPuf4dN\n5rA1fr9e7KtiuYQs1A0CQQCT06qHdHaQr78A6YTEbDVr0M57qVrdsm5xyXzCmpMy\n9LxXAACghjHbjF//FEOjNG21IutbCg6cNIRz5EM8+MD+\n-----END RSA PRIVATE KEY-----\n"} } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createTags_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createTags_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createTags_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createTags_default.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "createtagsresponse" : {"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","jobid":"createtagsjob"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createVolume_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createVolume_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createVolume_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createVolume_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "createvolumeresponse" : {"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","jobid":"createvolumejob"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/createVolume_withcustomdisksize.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "createvolumeresponse" : {"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","jobid":"createvolumejob"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json 2014-06-27 11:27:01.000000000 +0000 @@ -0,0 +1 @@ +{ "deletenetworkresponse" : {"jobid":"deleteNetwork"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deletePortForwardingRule_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "deleteportforwardingruleresponse" : {"jobid":"11114"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteSecurityGroup_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ +{ "deletesecuritygroupresponse" : { "success" : "true"} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteSSHKeyPair_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ +{ "deletesshkeypairresponse" : { "success" : "true"} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteTags_default.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "deletetagsresponse" : {"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","jobid":"deletetagsjob"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deleteVolume_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deletevolumeresponse" : { "success" : "true"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17164,"id":2602} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17177,"id":2602} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"errorcode" : 431, "errortext" : "Unable to find service offering: 104"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploykeyname.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"id":"fc4fd31a","jobid":"11116"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployproject.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":11117,"id":"19253fbf-abb7-4013-a8a1-97df3b93f206"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploysecuritygroup.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"id":"fc4fd31a-16d3-49db-814a-56b39b9ef986","jobid":"11115"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "destroyvirtualmachineresponse" : {"jobid":17166} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/detachVolume_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "detachvolumeresponse" : {"jobid":"detachvolumejob"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/disassociateIpAddress_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "disassociateipaddressresponse" : {"jobid":"11112"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/dummy_rsa.pub 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw3Fw6AdX+3Ul3lRJIPE0Hd5oBaHnCVB1Wl325FVeJZeQiKF9Z0sw+/StWuo2ZA5ra6/A8X7tITiO7goUncdd7xLT3r3UCwKGNZXrTn8e2Kutqd9S7EN+SUh63kZmcEQsFCuC3hg0O8TzG5ROQxukYc+7PAvcYk7+KV8r3B5eh2lvp5tHTpCX/63pm4zHm5rnE38DnESeh4Dh2R8hkhnoxo9ixQCdETbufUTo5abCkKbcf8/1+qA5A13uXqBsx/KtmZX0SvyQW3hKFPGXSaYxAE/u+DZU8Myr/dDKLrGPYt6e5CSXlQLFcnz99akuVdqOP9ygPGcgwlAajOZgt+Vwn sebgoa@sebmini.local diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listdiskofferingsresponse" : { "count":1 ,"diskoffering" : [ {"id":"6345e3b7-227e-4209-8f8c-1f94219696e6","name":"Disk offer 1","displaytext":"Disk offer 1 display name","disksize":10,"created":"2012-04-24T16:35:55+0200","iscustomized":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listDiskOfferings_withcustomdisksize.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listdiskofferingsresponse" : { "count":1 ,"diskoffering" : [ {"id":"6345e3b7-227e-4209-8f8c-1f94219696e6","name":"Disk offer 1","displaytext":"Disk offer 1 display name","disksize":10,"created":"2012-04-24T16:35:55+0200","iscustomized":true} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json 2014-06-27 11:27:01.000000000 +0000 @@ -0,0 +1 @@ +{ "listnetworkofferingsresponse" : { "count":2 ,"networkoffering" : [ {"id":"c348cabe-0208-49e0-91ad-32b88c55fd8c","name":"SourceNatNiciraNvpNetwork","displaytext":"Offering for a Nicira Nvp isolated network with SourceNat","tags":"BETA-SBP-DC-1-pSTT","traffictype":"Guest","isdefault":true,"specifyvlan":false,"conservemode":true,"specifyipranges":false,"availability":"Optional","networkrate":-1,"state":"Enabled","guestiptype":"Isolated","serviceofferingid":"01f93707-3a35-44a6-84e9-ea767287a6b2","service":[{"name":"Firewall","provider":[{"name":"VirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"ElasticIp","value":"false","canchooseservicecapability":false},{"name":"AssociatePublicIP","value":"false","canchooseservicecapability":false}]},{"name":"Lb","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"ElasticLb","value":"false","canchooseservicecapability":false},{"name":"InlineMode","value":"false","canchooseservicecapability":false}]},{"name":"SourceNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false},{"name":"RedundantRouter","value":"false","canchooseservicecapability":false}]},{"name":"Dns","provider":[{"name":"VirtualRouter"}]},{"name":"Connectivity","provider":[{"name":"NiciraNvp"}]},{"name":"Vpn","provider":[{"name":"VirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VirtualRouter"}]},{"name":"UserData","provider":[{"name":"VirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VirtualRouter"}]}],"forvpc":false,"ispersistent":false,"egressdefaultpolicy":false}, {"id":"7c09e208-2af5-43d6-9f0b-53868ef788ea","name":"OAT offering for OAT purposes","displaytext":"OAT offering for OAT purposes","tags":"BETA-SBP-DC-1-pSTT","traffictype":"Guest","isdefault":false,"specifyvlan":false,"conservemode":true,"specifyipranges":false,"availability":"Optional","networkrate":-1,"state":"Enabled","guestiptype":"Isolated","serviceofferingid":"01f93707-3a35-44a6-84e9-ea767287a6b2","service":[{"name":"Firewall","provider":[{"name":"VirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"ElasticIp","value":"false","canchooseservicecapability":false},{"name":"AssociatePublicIP","value":"false","canchooseservicecapability":false}]},{"name":"Lb","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"ElasticLb","value":"false","canchooseservicecapability":false},{"name":"InlineMode","value":"false","canchooseservicecapability":false}]},{"name":"SourceNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false},{"name":"RedundantRouter","value":"false","canchooseservicecapability":false}]},{"name":"Dns","provider":[{"name":"VirtualRouter"}]},{"name":"Connectivity","provider":[{"name":"NiciraNvp"}]},{"name":"Vpn","provider":[{"name":"VirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VirtualRouter"}]},{"name":"UserData","provider":[{"name":"VirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VirtualRouter"}]}],"forvpc":false,"ispersistent":false,"egressdefaultpolicy":true,"maxconnections":4096} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listnetworksresponse" : {"count": 1, "network": [{"domain": "ROOT", "acltype": "Domain", "specifyipranges": true, "related": "00304a04-c7ea-4e77-a786-18bc64347bf7", "zoneid": "1128bd56-b4d9-4ac6-a7b9-c715b187ce11", "domainid": "4a8857b8-7235-4e31-a7ef-b8b44d180850", "displaytext": "guestNetworkForBasicZone", "id": "00304a04-c7ea-4e77-a786-18bc64347bf7", "canusefordeploy": true, "physicalnetworkid": "07f747f5-b445-487f-b2d7-81a5a512989e", "networkdomain": "cs1cloud.internal", "service": [{"name": "SecurityGroup"}, {"name": "UserData"}, {"name": "Dhcp"}], "state": "Setup", "type": "Shared", "zonename": "CH-GV2", "networkofferingavailability": "Optional", "networkofferingid": "45964a3a-8a1c-4438-a377-0ff1e264047a", "tags": [], "networkofferingdisplaytext": "Exoscale Offering for Shared Security group enabled networks", "subdomainaccess": true, "traffictype": "Guest", "restartrequired": false, "broadcastdomaintype": "Vlan", "name": "guestNetworkForBasicZone", "dns2": "80.245.17.230", "dns1": "80.245.17.229", "networkofferingname": "ExoscaleSharedNetworkOfferingWithSGService", "issystem": false}]} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployfail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listPortForwardingRules_default.json 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1 @@ +{ "listportforwardingrulesresponse" : { "count":1 ,"portforwardingrule" : [ {"id":"bc7ea3ee-a2c3-4b86-a53f-01bdaa1b2e32","privateport":"33","privateendport":"34","protocol":"tcp","publicport":"33","publicendport":"34","virtualmachineid":"2600","virtualmachinename":"testlib","virtualmachinedisplayname":"testlib","ipaddressid":"96dac96f-0b5d-42c1-b5de-8a97f3e34c43","ipaddress":"1.1.1.116","state":"Active","cidrlist":"","tags":[]} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listProjects_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listProjects_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listProjects_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listProjects_default.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,49 @@ +{ "listprojectsresponse": { + "count": 1, + "project": [ + { + "id": "e7ce14a8-abb7-48b9-931f-5f426febed6d", + "name": "ExampleProjectName", + "displaytext": "ExampleProject", + "account": "CB0512", + "cpuavailable": 18, + "cpulimit": 32, + "cputotal": 14, + "domain": "ExampleDomain", + "domainid": "dc0314d4-09aa-4e8f-8a54-419ecf344635", + "ipavailable": 8, + "iplimit": 8, + "iptotal": 0, + "memoryavailable": 72000, + "memorylimit": 128000, + "memorytotal": 56000, + "networkavailable": 0, + "networklimit": 0, + "networktotal": 0, + "primarystorageavailable": 1204, + "primarystoragelimit": 1600, + "primarystoragetotal": 396, + "secondarystorageavailable": 3817, + "secondarystoragelimit": 4000, + "secondarystoragetotal": 183, + "snapshotavailable": 17, + "snapshotlimit": 20, + "snapshottotal": 3, + "state": "Active", + "tags": [], + "templateavailable": 17, + "templatelimit": 20, + "templatetotal": 3, + "vmavailable": 1, + "vmlimit": 8, + "vmrunning": 7, + "vmtotal": 7, + "volumeavailable": 1, + "volumelimit": 16, + "volumetotal": 15, + "vpcavailable": 20, + "vpclimit": 0, + "vpctotal": 0 + } + ] +} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "listpublicipaddressesresponse" : { "publicipaddress" : [ {"id":34000,"ipaddress":"1.1.1.116","virtualmachineid":"2600","allocated":"2011-06-23T05:20:39+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33999,"ipaddress":"1.1.1.48","allocated":"2011-06-23T05:20:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33998,"ipaddress":"1.1.1.47","allocated":"2011-06-23T05:20:30+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33970,"ipaddress":"1.1.1.19","allocated":"2011-06-20T04:08:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":true,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listResourceLimits_default.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1 @@ +{ "listresourcelimitsresponse" : { "count":8 ,"resourcelimit" : [ {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"0","max":20}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"1","max":-1}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"2","max":20}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"3","max":20}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"4","max":20}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"5","max":-1}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"6","max":20}, {"account":"fakeuser@mycompany.com","domainid":"bd0b5c60-cd1e-4bf0-8d90-72c4b0de7520","domain":"fakeuser@mycompany.com","resourcetype":"7","max":20} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_default.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "listsecuritygroupsresponse" : { "count":2 ,"securitygroup" : [ {"id":"ebfa2339","name":"default","description":"Default Security Group","account":"gmail.com","domainid":"ab53d864","domain":"gmail.com","ingressrule":[{"ruleid":"b83428c0-7f4c-44d1-bc96-4e1720168fdf","protocol":"tcp","startport":22,"endport":22,"cidr":"0.0.0.0/0"},{"ruleid":"bcb528e7-1a77-48f5-9bac-65cdcbc760e8","protocol":"tcp","startport":9200,"endport":9200,"cidr":"0.0.0.0/0"},{"ruleid":"80cca966-aa85-4bf0-9cb3-6b318b5d577f","protocol":"tcp","startport":9200,"endport":9300,"securitygroupname":"default","account":"runseb@gmail.com"},{"ruleid":"04f1264d-d0af-4c51-bcf9-4776166bba7d","protocol":"tcp","startport":8080,"endport":8080,"cidr":"0.0.0.0/0"}],"egressrule":[],"tags":[]}, {"id":"efae588b-5d74-4d0f-aeb8-96250d45fdee","name":"mongodb","description":"A SG for replica sets","account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","domain":"runseb@gmail.com","ingressrule":[{"ruleid":"ed30a29e-4456-4df6-ba26-26b350da0bd9","protocol":"tcp","startport":27017,"endport":27017,"securitygroupname":"default","account":"runseb@gmail.com"},{"ruleid":"4b5722f0-fc04-4263-93e7-c93564e5619c","protocol":"tcp","startport":22,"endport":22,"cidr":"0.0.0.0/0"},{"ruleid":"2430f851-189f-4d10-9bcd-b2276bc7d6a3","protocol":"tcp","startport":27017,"endport":27017,"cidr":"0.0.0.0/0"}],"egressrule":[],"tags":[]} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSecurityGroups_no_groups.json 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1 @@ +{ "listsecuritygroupsresponse" : {} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listserviceofferingsresponse" : { "serviceoffering" : [ {"id":105,"name":"Compute Micro PRD","displaytext":"1CPU, 384MB, 80GB HDD","cpunumber":1,"cpuspeed":1200,"memory":384,"created":"2011-06-01T03:38:05+0000","storagetype":"shared","offerha":false,"domainid":14,"domain":"AA000062"}, {"id":70,"name":"Compute XLarge PRD","displaytext":"8CPU, 13.6GB RAM, 160GB Storage","cpunumber":8,"cpuspeed":1200,"memory":13928,"created":"2011-02-08T07:06:19+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":69,"name":"Compute Large PRD","displaytext":"4CPU, 6.8GB RAM, 160GB Storage","cpunumber":4,"cpuspeed":1200,"memory":6964,"created":"2011-02-08T07:05:47+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":68,"name":"Compute Medium PRD","displaytext":"2CPU, 3.4GB RAM, 160GB Storage","cpunumber":2,"cpuspeed":1200,"memory":3484,"created":"2011-02-08T07:05:03+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":67,"name":"Compute Small PRD","displaytext":"1CPU, 1.7GB RAM, 160GB Storage","cpunumber":1,"cpuspeed":1200,"memory":1744,"created":"2011-02-08T07:03:44+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"listsshkeypairsresponse":{"count":1,"sshkeypair":[{"name":"cs-keypair","fingerprint":"00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}]}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one_doesnt_exist.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1 @@ +{ "listsshkeypairsresponse" : { } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_get_one.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1 @@ +{"listsshkeypairsresponse":{"count":1,"sshkeypair":[{"name":"cs-keypair","fingerprint":"00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}]}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listSSHKeyPairs_no_keys.json 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1 @@ +{"listsshkeypairsresponse":{}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listTemplates_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listtemplatesresponse" : { "template" : [ {"id":576,"name":"ESX[beta] Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","displaytext":"ESX[beta] Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","ispublic":true,"created":"2011-06-01T01:25:12+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":126,"ostypename":"Ubuntu 10.04 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":702743552,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":443,"name":"XEN Basic Windows Svr 2008 R2 x64 R2.1","displaytext":"XEN Basic Windows Svr 2008 R2 x64 R2.1","ispublic":true,"created":"2011-03-25T01:29:46+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":54,"ostypename":"Windows Server 2008 R2 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":474,"name":"XEN Basic Windows Svr 2003 SP2 STD","displaytext":"XEN Basic Windows Svr 2003 SP2 STD","ispublic":true,"created":"2011-04-07T10:38:45+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":89,"ostypename":"Windows Server 2003 Standard Edition(32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":444,"name":"ESX[beta] Windows 2003 x32 R2.0","displaytext":"ESX[beta] Windows 2003 x32 R2.0","ispublic":true,"created":"2011-03-25T01:34:00+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":89,"ostypename":"Windows Server 2003 Standard Edition(32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":876909056,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":447,"name":"ESX[beta] Windows 2008 x32 R2.0","displaytext":"ESX[beta] Windows 2008 x32 R2.0","ispublic":true,"created":"2011-03-25T01:45:23+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":52,"ostypename":"Windows Server 2008 (32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":3391547904,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":462,"name":"ESX[beta] Centos 5.5 x64 R2.0","displaytext":"ESX[beta] Centos 5.5 x64 R2.0","ispublic":true,"created":"2011-03-28T05:06:36+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":2263178240,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":425,"name":"XEN Windows 2008 x32 R2.0","displaytext":"XEN Windows 2008 x32 R2.0","ispublic":true,"created":"2011-03-22T03:22:21+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":52,"ostypename":"Windows Server 2008 (32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":461,"name":"ESX[beta] Basic Windows 2008 R2 x64","displaytext":"ESX[beta] Basic Windows 2008 R2 x64","ispublic":true,"created":"2011-03-26T22:48:48+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":54,"ostypename":"Windows Server 2008 R2 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":3230146048,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":575,"name":"Xen Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","displaytext":"Xen Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","ispublic":true,"created":"2011-06-01T01:06:21+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":85899345920,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":481,"name":"XEN Centos 5.4 x64 R2.0","displaytext":"XEN Centos 5.4 x64 R2.0","ispublic":true,"created":"2011-04-14T01:43:49+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171966464000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":421,"name":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","displaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","ispublic":true,"created":"2011-03-22T02:54:06+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":167772160000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":423,"name":"XEN Basic Centos 5.5 x64 PV r2.2","displaytext":"XEN Basic Centos 5.5 x64 PV r2.2","ispublic":true,"created":"2011-03-22T02:59:31+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":167772160000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":422,"name":"XEN OpenSUSE x64 11.4 R2.0","displaytext":"XEN OpenSUSE x64 11.4 R2.0","ispublic":true,"created":"2011-03-22T02:58:25+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171966464000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listTemplates_notemplates.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listtemplatesresponse" : {} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listVirtualMachines_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listvirtualmachinesresponse" : { "virtualmachine" : [ {"id":2600,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:06:42+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"1.78%","networkkbsread":2,"networkkbswrite":2,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3891,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.116","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2601,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:09:44+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"jobid":17147,"jobstatus":0,"nic":[{"id":3892,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.203","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listVolumes_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listvolumesresponse" : { "count":1 ,"volume" : [ {"id":"fe1ada16-57a0-40ae-b577-01a153690fb4","name":"ROOT-69942","zoneid":"7dbc4787-ec2f-498d-95f0-848c8c81e5da","zonename":"MTV-Zone1","type":"ROOT","deviceid":0,"virtualmachineid":"3239ade9-fd25-405c-8eda-59f0313a3fb0","vmname":"apb-cent32-bld","vmdisplayname":"apb-cent32-bld","vmstate":"Stopped","size":139264,"created":"2013-04-16T16:25:57-0700","state":"Ready","account":"andrew","domainid":"41a4917b-7952-499d-ba7f-4c57464d3dc8","domain":"ROOT","storagetype":"local","hypervisor":"KVM","storage":"c2422.halxg.cloudera.com","destroyed":false,"serviceofferingid":"7cc4f8c3-7c56-4155-9916-9f42072ea712","serviceofferingname":"Tiny","serviceofferingdisplaytext":"Tiny (1 core, 1GB RAM)","isextractable":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail2.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_deployfail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/listZones_withcustomdisksize.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11111.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"cb13887","userid":"11110","cmd":"org.apache.cloudstack.api.command.user.address.AssociateIPAddrCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"ipaddress":{"id":"10987171-8cc9-4d0a-b98f-1698c09ddd2d","ipaddress":"7.5.6.1","allocated":"2013-09-05T06:19:59-0400","zoneid":"d06193b2-7980-4ad1-b5d8-7b2f2eda63c3","zonename":"SanJose","issourcenat":false,"account":"admin","domainid":"0ce03bf9-540d-47da-b3f3-75df64fddf2f","domain":"ROOT","forvirtualnetwork":true,"vlanid":"0cf2e30a-1d75-4108-8a50-259e0590faf9","vlanname":"67","isstaticnat":false,"issystem":false,"associatednetworkid":"edd15baf-b5af-4805-8484-3478ad236372","associatednetworkname":"defaultown","networkid":"9a3b3ed4-40f4-4def-aecb-8b9f54536693","state":"Allocating","physicalnetworkid":"3a997df4-ea89-4b53-b523-010857a9087b","tags":[]}},"created":"2013-09-05T06:19:59-0400","jobid":"11111"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11112.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"cb13887c-7c6a-464d-aa8f-c6971f9dd458","userid":"11110e27-5a51-4109-9598-7df34141761a","cmd":"org.apache.cloudstack.api.command.user.address.DisassociateIPAddrCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2013-09-05T06:36:44-0400","jobid":"11112"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11113.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"cb13887c","userid":"11110","cmd":"org.apache.cloudstack.api.command.user.firewall.CreatePortForwardingRuleCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"portforwardingrule":{"id":"bc7ea3ee-a2c3-4b86-a53f-01bdaa1b2e32","privateport":"33","privateendport":"33","protocol":"tcp","publicport":"33","publicendport":"33","virtualmachineid":"8879e159-d4a6-4a1d-bc75-074c097bef2a","virtualmachinename":"testlib","virtualmachinedisplayname":"libcloud","ipaddressid":"96dac96f-0b5d-42c1-b5de-8a97f3e34c43","ipaddress":"7.5.1.7","state":"Active","cidrlist":"","tags":[]}},"created":"2013-09-05T07:33:43-0400","jobid":"11113"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11114.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"cb13887c-7c6a-464d-aa8f-c6971f9dd458","userid":"11110e27-5a51-4109-9598-7df34141761a","cmd":"org.apache.cloudstack.api.command.user.firewall.DeletePortForwardingRuleCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2013-09-05T09:09:01-0400","jobid":"11114"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11115.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"b8c0baab","userid":"968f6b4e","cmd":"com.cloud.api.commands.DeployVMCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":"fc4fd31a-16d3-49db-814a-56b39b9ef986","name":"test","displayname":"test","account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","domain":"runseb@gmail.com","created":"2013-09-05T15:45:11+0200","state":"Running","haenable":false,"zoneid":"1128bd56-b4d9-4ac6-a7b9-c715b187ce11","zonename":"CH-GV2","templateid":"a17b40d6-83e4-4f2a-9ef0-dce6af575789","templatename":"Linux Ubuntu 12.04 LTS 64-bit","templatedisplaytext":"Linux Ubuntu 12.04 LTS 64-bit 10GB Disk","passwordenabled":true,"serviceofferingid":"71004023-bb72-4a97-b1e9-bc66dfce9470","serviceofferingname":"Micro","cpunumber":1,"cpuspeed":2198,"memory":512,"guestosid":"113038d0-a8cd-4d20-92be-ea313f87c3ac","rootdeviceid":0,"rootdevicetype":"Filesystem","securitygroup":[{"id":"efae588b-5d74-4d0f-aeb8-96250d45fdee","name":"default","description":"A SG for replica sets"},{"id":"ebfa2339-e9ae-4dcb-b73c-a76cd3fce39e","name":"mongodb","description":"Default Security Group"}],"password":"iQ7hbmrjw","nic":[{"id":"09a806c7-ab1e-452d-a779-dda974ea7e41","networkid":"00304a04-c7ea-4e77-a786-18bc64347bf7","netmask":"255.255.254.0","gateway":"185.19.28.1","ipaddress":"185.19.28.98","traffictype":"Guest","type":"Shared","isdefault":true,"macaddress":"06:54:5a:00:00:95"}],"hypervisor":"KVM","tags":[]}},"created":"2013-09-05T15:45:11+0200","jobid":"10046ad6-917f-4c25-add3-e08cd98e0e87"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11116.json 2013-09-09 15:24:52.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"b8c0baab-18a1-44c0-ab67-e24049212925","userid":"968f6b4e-b382-4802-afea-dd731d4cf9b9","cmd":"com.cloud.api.commands.DeployVMCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":"94fa7e76-9a39-4e5a-8f84-40e496650780","name":"test","displayname":"test","account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","domain":"runseb@gmail.com","created":"2012-09-05T16:05:50+0200","state":"Running","haenable":false,"zoneid":"1128bd56-b4d9-4ac6-a7b9-c715b187ce11","zonename":"CH-GV2","templateid":"a17b40d6-83e4-4f2a-9ef0-dce6af575789","templatename":"Linux Ubuntu 12.04 LTS 64-bit","templatedisplaytext":"Linux Ubuntu 12.04 LTS 64-bit 10GB Disk","passwordenabled":true,"serviceofferingid":"71004023-bb72-4a97-b1e9-bc66dfce9470","serviceofferingname":"Micro","cpunumber":1,"cpuspeed":2198,"memory":512,"guestosid":"113038d0-a8cd-4d20-92be-ea313f87c3ac","rootdeviceid":0,"rootdevicetype":"Filesystem","securitygroup":[{"id":"ebfa2339-e9ae-4dcb-b73c-a76cd3fce39e","name":"default","description":"Default Security Group"}],"password":"zU3gwurmi","nic":[{"id":"2bd513cf-7453-4594-87c2-2d8b17ae8fad","networkid":"00304a04-c7ea-4e77-a786-18bc64347bf7","netmask":"255.255.254.0","gateway":"185.19.28.1","ipaddress":"185.19.28.96","traffictype":"Guest","type":"Shared","isdefault":true,"macaddress":"06:84:e0:00:00:93"}],"hypervisor":"KVM","tags":[],"keypair":"foobar"}},"created":"2013-09-05T16:05:51+0200","jobid":"a0c97348-4d21-43ba-879e-9d8340ec0805"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_11117.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,63 @@ +{ "queryasyncjobresultresponse": { + "accountid": "86d47ca2-726b-4b85-a18a-77d6b0d79829", + "userid": "20cd68f5-0633-48a5-826e-e4e2a00dd6b8", + "cmd": "org.apache.cloudstack.api.command.user.vm.DeployVMCmd", + "jobstatus": 1, + "jobprocstatus": 0, + "jobresultcode": 0, + "jobresulttype": "object", + "jobresult": { + "virtualmachine": { + "id": "19253fbf-abb7-4013-a8a1-97df3b93f206", + "name": "TestNode", + "projectid": "b90442d1-079b-4066-ab7d-41f8f3a5078b", + "project": "Test Project", + "domainid": "dc0314d4-09aa-4e8f-8a54-419ecf344635", + "domain": "Test Domain", + "created": "2014-03-06T15:39:44-0600", + "state": "Running", + "haenable": false, + "zoneid": "d630b15a-a9e1-4641-bee8-355005b7a14d", + "zonename": "TestZone", + "templateid": "a032e8a0-3411-48b7-9e78-ff66823e6561", + "templatename": "OL-6.3.1-64-13.11.01", + "templatedisplaytext": "OL-6.3.1-64-13.11.01", + "passwordenabled": true, + "serviceofferingid": "519f8667-26d0-40e5-a1cd-da04be1fd9b5", + "serviceofferingname": "Test Service Offering", + "cpunumber": 1, + "cpuspeed": 2000, + "memory": 2000, + "guestosid": "b8506c91-6d8e-4086-8659-f6296a7b71ac", + "rootdeviceid": 0, + "rootdevicetype": "ROOT", + "securitygroup": [], + "password": "mW6crjxag", + "nic": [ + { + "id": "1c144283-979a-4359-b695-3334dc403457", + "networkid": "1bf4acce-19a5-4830-ab1d-444f8acb9986", + "networkname": "Public", + "netmask": "255.255.252.0", + "gateway": "10.1.2.2", + "ipaddress": "10.2.2.8", + "isolationuri": "vlan://2950", + "broadcasturi": "vlan://2950", + "traffictype": "Guest", + "type": "Shared", + "isdefault": true, + "macaddress": "06:ef:30:00:04:22" + } + ], + "hypervisor": "VMware", + "tags": [], + "affinitygroup": [], + "displayvm": true, + "isdynamicallyscalable": false, + "jobid": "e23b9f0c-b7ae-4ffe-aea0-c9cf436cc315", + "jobstatus": 0 + } + }, + "created": "2014-03-06T15:39:44-0600", + "jobid": "e23b9f0c-b7ae-4ffe-aea0-c9cf436cc315" +} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17164,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"192.168.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17165,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Destroyed","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17177,"jobstatus":2} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17188.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17199.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Stopped","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_17200.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"queryasyncjobresultresponse":{"jobid":17200,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"securitygroup":[{"egressrule":[],"account":"runseb@gmail.com","domainid":"ab53d864-6f78-4993-bb28-9b8667b535a1","name":"MySG","domain":"runseb@gmail.com","ingressrule":[{"startport":22,"cidr":"0.0.0.0/0","protocol":"tcp","endport":22,"ruleid":"7df1edc8-6e56-48d7-b816-39377506d787"}],"id":"fa334c44-21c6-4809-ad7d-287bbb23c29b"}]}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_attachvolumejob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.AttachVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","deviceid":5,"virtualmachineid":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmname":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmdisplayname":"gre-kickstart","vmstate":"Running","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Ready","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"KVM","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering 1","diskofferingdisplaytext":"Disk offering 1 display name","storage":"Shared Storage CL01","attached":"2012-06-05T09:17:38+0200","destroyed":false,"isextractable":false}},"created":"2012-06-05T09:17:38+0200","jobid":"e07d6b9b-2b6c-45bd-840b-3c4c3d890168"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createtagsjob.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"dcfd4b83-2ae6-43d1-a2eb-af87066ecbc9","userid":"c3d3cb3c-0f13-429a-b900-5bacc346df32","cmd":"com.cloud.api.commands.CreateTagsCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2014-02-07T20:10:40+0100","jobid":"2a7426a5-e25e-4400-900d-09bca3c0a039"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_createvolumejob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.CreateVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Allocated","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"None","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering","diskofferingdisplaytext":"Disk offering display name","storage":"none","destroyed":false,"isextractable":false}},"created":"2012-06-05T08:47:54+0200","jobid":"35416f6d-1b5b-4ceb-a7d4-aab0deede71b"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json 2014-06-27 11:27:01.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"02c9bf08-6f36-44b1-a57f-df0708f90de4","userid":"6ef2b921-4ecf-4651-8188-f9868db73e73","cmd":"org.apache.cloudstack.api.command.user.network.DeleteNetworkCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2014-06-11T10:09:00+0200","jobid":"65789636-d2c8-484c-9d13-47ad3de384ed"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deletetagsjob.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"dcfd4b83-2ae6-43d1-a2eb-af87066ecbc9","userid":"c3d3cb3c-0f13-429a-b900-5bacc346df32","cmd":"com.cloud.api.commands.DeleteTagsCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2014-02-08T13:43:24+0100","jobid":"02425faf-4cf4-44c2-9241-cb8b1eabc957"} } \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_detachvolumejob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.DetachVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"5931d2ca-4e90-4915-88a8-32b38b3991a3","name":"gre-test-volume","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","size":10737418240,"created":"2012-06-15T14:56:40+0200","state":"Ready","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"KVM","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"OS disk for Windows","diskofferingdisplaytext":"OS disk for Windows","storage":"Shared Storage CL01","destroyed":false,"isextractable":false}},"created":"2012-06-15T15:08:39+0200","jobid":"ca6c856d-1f36-4e27-989e-09cad2dad808"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "rebootvirtualmachineresponse" : {"jobid":17165} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "registersshkeypairresponse": { "keypair": { "name": "foobar", "fingerprint": "c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15" } } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/registerSSHKeyPair_error.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1 @@ +{ "registersshkeypairresponse" : {"uuidList":[],"errorcode":431,"errortext":"Public key is invalid"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupEgress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupEgress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupEgress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupEgress_default.json 2013-09-17 09:18:07.000000000 +0000 @@ -0,0 +1,2 @@ +{ "revokesecuritygroupegressresponse" : {"jobid":17203} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupIngress_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupIngress_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupIngress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/revokeSecurityGroupIngress_default.json 2013-09-17 09:15:06.000000000 +0000 @@ -0,0 +1,2 @@ +{ "revokesecuritygroupingressresponse" : {"jobid":17201} } + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/startVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "startvirtualmachineresponse" : {"jobid":17188} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/cloudstack/stopVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "stopvirtualmachineresponse" : {"jobid":17199} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/create_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/create_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/create_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/create_node.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","droplet":{"id":119461,"name":"test-2","image_id":1601,"size_id":66,"event_id":919341}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/destroy_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/destroy_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/destroy_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/destroy_node.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","event_id":918910} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/error_invalid_image.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/error_invalid_image.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/error_invalid_image.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/error_invalid_image.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"status":"ERROR","error_message":"You specified an invalid image for Droplet creation.","message":"You specified an invalid image for Droplet creation."} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/error.txt libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/error.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/error.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/error.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +You are being redirected. diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_create_ssh_key.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","ssh_key":{"id":7717,"name":"test1","ssh_pub_key":"aaq"}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_destroy_ssh_key.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK"} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_list_ssh_keys.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","ssh_keys":[{"id":7717,"name":"test1"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_rename_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_rename_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/ex_rename_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/ex_rename_node.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","event_id":918910} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_images.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,145 @@ +{ + "status": "OK", + "images": [ + { + "id": 1601, + "name": "CentOS 5.8 x64", + "distribution": "CentOS" + }, + { + "id": 1602, + "name": "CentOS 5.8 x32", + "distribution": "CentOS" + }, + { + "id": 1605, + "name": "CentOS 6.0 x32", + "distribution": "CentOS" + }, + { + "id": 1606, + "name": "Fedora 15 x64", + "distribution": "Fedora" + }, + { + "id": 1609, + "name": "Ubuntu 11.10 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 1611, + "name": "CentOS 6.2 x64", + "distribution": "CentOS" + }, + { + "id": 1615, + "name": "Fedora 16 x64 Server", + "distribution": "Fedora" + }, + { + "id": 1618, + "name": "Fedora 16 x64 Desktop", + "distribution": "Fedora" + }, + { + "id": 2676, + "name": "Ubuntu 12.04 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 12573, + "name": "Debian 6.0 x64", + "distribution": "Debian" + }, + { + "id": 12574, + "name": "CentOS 6.3 x64", + "distribution": "CentOS" + }, + { + "id": 12575, + "name": "Debian 6.0 x32", + "distribution": "Debian" + }, + { + "id": 12578, + "name": "CentOS 6.3 x32", + "distribution": "CentOS" + }, + { + "id": 14097, + "name": "Ubuntu 10.04 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 14098, + "name": "Ubuntu 10.04 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 14218, + "name": "Ubuntu 12.04 x64 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 25306, + "name": "Ubuntu 12.10 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 25485, + "name": "Ubuntu 12.10 x32 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 25489, + "name": "Ubuntu 12.10 x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 25493, + "name": "Ubuntu 12.10 x64 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 32387, + "name": "Fedora 17 x32 Server", + "distribution": "Fedora" + }, + { + "id": 32399, + "name": "Fedora 17 x32 Desktop", + "distribution": "Fedora" + }, + { + "id": 32419, + "name": "Fedora 17 x64 Desktop", + "distribution": "Fedora" + }, + { + "id": 32428, + "name": "Fedora 17 x64 Server", + "distribution": "Fedora" + }, + { + "id": 42735, + "name": "Ubuntu 12.04 x32 Server", + "distribution": "Ubuntu" + }, + { + "id": 43458, + "name": "Ubuntu 11.04x64 Server", + "distribution": "Ubuntu" + }, + { + "id": 43462, + "name": "Ubuntu 11.04x32 Desktop", + "distribution": "Ubuntu" + }, + { + "id": 46964, + "name": "LAMP on Ubuntu 12.04", + "distribution": "Ubuntu" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_locations.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_locations.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_locations.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_locations.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","regions":[{"id":1,"name":"New York 1"},{"id":2,"name":"Amsterdam 1"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_nodes_empty.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","droplets":[]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_nodes.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_nodes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_nodes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_nodes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","droplets":[{"id":119461,"name":"test-2","image_id":1601,"size_id":66,"region_id":1,"backups_active":null,"ip_address":null,"status":"new"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_sizes.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_sizes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/list_sizes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/list_sizes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","sizes":[{"id":66,"name":"512MB"},{"id":63,"name":"1GB"},{"id":62,"name":"2GB"},{"id":64,"name":"4GB"},{"id":65,"name":"8GB"},{"id":61,"name":"16GB"},{"id":60,"name":"32GB"},{"id":70,"name":"48GB"},{"id":69,"name":"64GB"},{"id":68,"name":"96GB"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/reboot_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/reboot_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/digitalocean/reboot_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/digitalocean/reboot_node.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"status":"OK","event_id":918910} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/allocate_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/allocate_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/allocate_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/allocate_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,5 @@ + + 56926e0e-5fa3-41f3-927c-17212def59df + 192.0.2.1 + standard + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/allocate_vpc_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,6 @@ + + eb920193-37a9-401a-8fff-089783b2c153 + 192.0.2.2 + vpc + eipalloc-666d7f04 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/associate_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/associate_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/associate_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/associate_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 1a470be4-b44d-4423-a80c-44ef2070b8be + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/associate_vpc_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,5 @@ + + s132fsz2-6cdg-4ox3-a148-lpqnvdc98c2a + true + eipassoc-167a8073 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_internet_gateway.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,4 @@ + + 1eb45fd7-d4f6-4b63-a52f-54fc0c82617e + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_network_interface.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_network_interface.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_network_interface.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_network_interface.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + e46c7abc-8b14-4315-99cb-773a0f95d833 + eni-attach-2b588b47 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/attach_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/attach_volume.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,8 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + vol-4d826724 + i-6058a509 + /dev/sdh + attaching + 2008-05-07T11:51:50.000Z + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/authorize_security_group_egress.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/authorize_security_group_ingress.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/copy_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/copy_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/copy_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/copy_image.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 7b7d87d5-c045-4c2c-a2c4-b538debe14b2 + ami-4db38224 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_image.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 3629ec66-c1f8-4b66-aac5-a8ad1cdf6c15 + ami-e9b38280 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_internet_gateway.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,8 @@ + + 437b9824-8143-4583-98f7-0937d53aea83 + + igw-13ac2b36 + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_key_pair.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_key_pair.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_key_pair.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_key_pair.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,22 @@ + + my-key-pair + + 1f:51:ae:28:bf:89:e9:d8:1f:25:5d:37:2d:7d:b8:ca:9f:f5:f1:6f + + ---- BEGIN RSA PRIVATE KEY ---- +MIICiTCCAfICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMC +VVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6 +b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAd +BgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcN +MTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYD +VQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25z +b2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFt +YXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ +21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9T +rDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpE +Ibb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4 +nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0Fkb +FFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTb +NYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE +-----END RSA PRIVATE KEY----- + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_network_interface.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_network_interface.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_network_interface.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_network_interface.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,30 @@ + + ca764ebe-8abc-4d37-9995-b9e88c086fa8 + + eni-2b36086d + subnet-5ed9d432 + vpc-62ded30e + us-east-1d + My Test + 123456789098 + false + pending + 0e:bd:49:3e:11:74 + 172.16.4.144 + ip-172-16-4-144.ec2.internal + true + + + sg-495a9926 + default + + + + + + 172.16.4.144 + true + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_security_group.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_security_group.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_security_group.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_security_group.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,5 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + sg-52e2f530 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_snapshot.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_snapshot.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_snapshot.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_snapshot.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,11 @@ + + 59dbff89-35bd-4eac-99ed-be587 + snap-a7cb2hd9 + vol-4282672b + pending + 2013-08-15T16:22:30.000Z + 60% + 1836219348 + 10 + Test description + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_subnet.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_subnet.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_subnet.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_subnet.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,11 @@ + + e94b315e-6424-4536-b48d-0dfb47732c72 + + subnet-ce0e7ce6 + pending + vpc-532135d1 + 192.168.51.128/26 + 59 + us-east-1b + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_tags.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_tags.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_tags.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_tags.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + b001068a-ca0d-4f05-b622-28fe984f44be + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_volume.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + vol-4d826724 + 10 + + us-east-1a + creating + 2008-05-07T11:51:50.000Z + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_vpc.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_vpc.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/create_vpc.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/create_vpc.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,10 @@ + + 7a662fe5-1f34-4e17-9ee9-69a28a8ac0be + + vpc-ad3527cf + pending + 192.168.55.0/24 + dopt-7eded312 + default + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_internet_gateway.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,4 @@ + + b1a5c8c9-91c7-43f3-8234-c162db89a2df + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_key_pair.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_key_pair.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_key_pair.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_key_pair.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_network_interface.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_network_interface.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_network_interface.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_network_interface.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + c0bc0036-e328-47c6-bd6d-318b007f66ee + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_security_group.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_security_group.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_security_group.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_security_group.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_snapshot.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_snapshot.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_snapshot.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_snapshot.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 5cd6fa89-35bd-4aac-99ed-na8af7 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_subnet.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_subnet.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_subnet.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_subnet.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 5cd6fa89-35bd-4aac-99ed-na8af7 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_tags.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_tags.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_tags.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_tags.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 7a297da7-3ecb-4156-8bcb-3be73896cc14 + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_volume.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_vpc.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_vpc.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/delete_vpc.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/delete_vpc.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 85793fa6-2ece-480c-855f-0f82c3257e50 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/deregister_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/deregister_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/deregister_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/deregister_image.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + d06f248d-444e-475d-a8f8-1ebb4ac39842 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_account_attributes.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,30 @@ + + + a00adaf6-f86c-48eb-85f8-7ac470ae993d + + + max-instances + + + 20 + + + + + max-elastic-ips + + + 5 + + + + + vpc-max-elastic-ips + + + 5 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_all.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,35 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + eipalloc-602b5d01 + vpc + i-4382922a + eipassoc-cea049ab + eni-83e3c5c5 + 123456789098 + 192.168.1.5 + + + 1.2.3.5 + eipalloc-998195fb + vpc + i-4382922b + eipassoc-cea049ac + eni-83e3c5c6 + 123456789098 + 192.168.1.6 + + + 1.2.3.6 + eipalloc-922a5cf3 + standard + + + 1.2.3.7 + eipalloc-992a5cf8 + vpc + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_multi.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,17 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + i-4382922a + + + 1.2.3.6 + i-4382922b + + + 1.2.3.5 + i-4382922b + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses_single.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + i-4382922a + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_addresses.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_addresses.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + eipalloc-602b5d01 + vpc + i-4382922a + eipassoc-cea049ab + eni-83e3c5c5 + 123456789098 + 192.168.1.5 + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_availability_zones.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,17 @@ + + cc0dfb29-efef-451c-974f-341b3edfb28f + + + eu-west-1a + available + eu-west-1 + + + + eu-west-1b + available + eu-west-1 + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_images_ex_imageids.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,36 @@ + + 73fac9c5-f6d2-4b45-846f-47adf1e82d6c + + + ami-57ba933a + 123456788908/Test Image + available + 123456788908 + false + x86_64 + machine + aki-88aa75e1 + Test Image + Testing Stuff + ebs + /dev/sda1 + + + /dev/sda1 + + snap-88123ed9 + 10 + true + standard + + + + /dev/sda2 + ephemeral0 + + + paravirtual + xen + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_images.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_images.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_images.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_images.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,62 @@ + + 73fac9c5-f6d2-4b45-846f-47adf1e82d6c + + + ami-57ba933a + 123456788908/Test Image + available + 123456788908 + false + x86_64 + machine + aki-88aa75e1 + Test Image + Testing Stuff + ebs + /dev/sda1 + + + /dev/sda1 + + snap-88123ed9 + 10 + true + standard + + + + /dev/sda2 + ephemeral0 + + + paravirtual + xen + + + ami-85b2a8ae + 123456788908/Test Image 2 + available + 123456788908 + false + x86_64 + machine + aki-88aa75e1 + Test Image 2 + ebs + /dev/sda1 + + + /dev/sda1 + + snap-c0bfbbdb + 20 + false + standard + + + + paravirtual + xen + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,182 @@ + + ec0d2a7d-5080-4f4b-9b02-cb0d5d2d4274 + + + r-fd67fb97 + 123456789098 + + + + i-4382922a + ami-3215fe5a + + 80 + stopped + + + + User initiated (2014-01-11 14:39:31 GMT) + fauxkey + 0 + + m1.small + 2013-12-02T11:58:11.000Z + + us-east-1d + + default + + aki-88aa75e1 + + disabled + + 10.211.11.211 + 1.2.3.4 + + + sg-42916629 + Test Group 1 + + + sg-42916628 + Test Group 2 + + + + Client.UserInitiatedShutdown + Client.UserInitiatedShutdown: User initiated shutdown + + x86_64 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-5e312311 + attached + 2013-04-09T18:01:01.000Z + true + + + + paravirtual + ifmxj1365530456668 + + xen + + false + + + + + r-88dc1bef + 123456789098 + + + + i-8474834a + ami-29674340 + + 80 + stopped + + ip-172-16-9-139.ec2.internal + + User initiated (2014-01-11 14:39:31 GMT) + cderamus + 0 + + t1.micro + 2013-12-02T15:58:29.000Z + + us-east-1d + + default + + aki-88aa75e1 + + disabled + + subnet-5fd9d412 + vpc-61dcd30e + 172.16.9.139 + 1.2.3.5 + true + + + sg-495a9926 + default + + + + Client.UserInitiatedShutdown + Client.UserInitiatedShutdown: User initiated shutdown + + x86_64 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-60124921 + attached + 2013-12-02T15:58:32.000Z + false + + + + paravirtual + + + + Name + Test Server 2 + + + Group + VPC Test + + + xen + + + eni-c5dffd83 + subnet-5fd9d412 + vpc-61dcd30e + + 123456789098 + in-use + 0e:27:72:16:52:ab + 172.16.9.139 + ip-172-16-9-139.ec2.internal + true + + + sg-495a9926 + default + + + + eni-attach-4d924721 + 0 + attached + 2013-12-02T15:58:29.000Z + true + + + + 172.16.4.139 + ip-172-16-4-139.ec2.internal + true + + + + + false + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_instance_types.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_instance_types.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_instance_types.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_instance_types.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +truem1.small15256t1.micro15256m1.medium110512c1.medium210512m1.large1101024c1.xlarge2101024m1.xlarge1102048m2.xlarge2102048m3.xlarge16152048m2.2xlarge16304096m3.2xlarge16304096cc1.4xlarge16603072m2.4xlarge16604096cc2.8xlarge161206144hi1.4xlarge161206144cg1.4xlarge1620012288cr1.8xlarge1624016384hs1.8xlarge1624000119808 \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_internet_gateways.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,20 @@ + + 843ff26c-f1ac-48f5-93a6-fa28f8abd9dd + + + igw-84dd3ae1 + + + + + igw-7fdae215 + + + vpc-62cad41e + available + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_key_pairs_doesnt_exist.xml 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,2 @@ + +InvalidKeyPair.NotFoundThe key pair 'test-key-pair' does not exist31b97300-eb8e-405e-9567-b0f57b791fed diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_key_pairs.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,11 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + gsg-keypair + + 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_network_interfaces.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,83 @@ + + e8fc6c0b-d6f8-4b85-aa29-e6a097eb4631 + + + eni-18e6c05e + subnet-5ed9d432 + vpc-62ded30e + us-east-1d + Test Interface 1 + 123456789098 + false + in-use + 0e:6e:df:72:78:af + 172.16.4.133 + ip-172-16-4-133.ec2.internal + true + + + sg-495a9926 + default + + + + eni-attach-c87dd1a4 + i-caa71db1 + 123456789098 + 1 + attached + 2013-12-02T17:46:27.000Z + false + + + + + 172.16.4.133 + ip-172-16-4-133.ec2.internal + true + + + + + eni-83e3c5c5 + subnet-5ed9d432 + vpc-62ded30e + us-east-1d + + 123456789098 + false + in-use + 0e:93:0b:e9:e9:c4 + 172.16.4.145 + ip-172-16-4-145.ec2.internal + true + + + sg-13e4607c + Test Group + + + sg-495a9926 + default + + + + eni-attach-bae984d6 + i-caa71db1 + 123456789098 + 0 + attached + 2013-11-25T13:35:03.000Z + true + + + + + 172.16.4.145 + ip-172-16-4-145.ec2.internal + true + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_reserved_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,21 @@ + + 56d0fffa-8819-4658-bdd7-548f143a86d2 + + + 93bbbca2-c500-49d0-9ede-9d8737400498 + t1.micro + us-east-1b + 2013-06-18T12:07:53.161Z + 31536000 + 23.0 + 0.012 + 1 + Linux/UNIX + active + default + USD + Light Utilization + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_security_groups.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_security_groups.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_security_groups.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_security_groups.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,50 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 111122223333 + sg-443d0a12 + WebServers + Web Servers + + + + tcp + 80 + 80 + + + + 0.0.0.0/0 + + + + + + + + + 111122223333 + sg-5ff8a023 + RangedPortsBySource + Group A + + + tcp + 6000 + 7000 + + + 111122223333 + sg-99gh4012 + Group B + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_snapshots.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_snapshots.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_snapshots.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_snapshots.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,43 @@ + + and4xcasi-35bd-4e3c-89ab-cb183 + + + snap-428abd35 + vol-e020df80 + pending + 2013-09-15T15:40:30.000Z + 90% + 1938218230 + 30 + Daily Backup + + + Keyone + DB_Backup + + + + + + + snap-18349159 + vol-b5a2c1v9 + pending + 2013-09-15T16:00:30.000Z + 30% + 1938218230 + 15 + Weekly backup + + + Name + DB Backup 1 + + + Key2 + db_backup + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_subnets.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_subnets.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_subnets.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_subnets.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,37 @@ + + 67bba371-8044-45a7-b4a3-d72fef8b96d8 + + + subnet-ce0e7ce5 + available + vpc-532135d1 + 192.168.51.0/25 + 123 + us-east-1a + false + false + + + Name + Test Subnet 1 + + + + + subnet-ce0e7ce6 + available + vpc-532135d1 + 192.168.51.128/64 + 59 + us-east-1b + false + false + + + Name + Test Subnet 2 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_tags.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_tags.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_tags.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_tags.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,23 @@ + + fa7e0e44-df5e-49a0-98d7-5d4d19a29f95 + + + i-4382922a + instance + tag + test one + + + i-4382922a + instance + owner + libcloud + + + i-4382922a + instance + stack + Production + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_volumes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_volumes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_volumes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_volumes.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,42 @@ + + 766b978a-f574-4c8d-a974-57547a8c304e + + + vol-10ae5e2b + 1 + + us-east-1d + available + 2013-10-09T05:41:37.000Z + + + + vol-v24bfh75 + 11 + + us-east-1c + available + 2013-10-08T19:36:49.000Z + + + + vol-b6c851ec + 8 + snap-30d37269 + us-east-1d + in-use + 2013-06-25T02:04:12.000Z + + + vol-b6c851ec + i-d334b4b3 + /dev/sda1 + attached + 2013-06-25T02:04:12.000Z + true + + + standard + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_vpcs.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_vpcs.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/describe_vpcs.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/describe_vpcs.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,28 @@ + + be8cfa34-0710-4895-941f-961c5738f8f8 + + + vpc-532335e1 + available + 192.168.51.0/24 + dopt-7eded312 + + default + false + + + vpc-62ded30e + available + 192.168.52.0/24 + dopt-7eded312 + + + Name + Test VPC + + + default + false + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_internet_gateway.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,4 @@ + + 7098cc6d-a984-4d34-a5ed-6ae1a645c0b6 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_network_interface.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_network_interface.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_network_interface.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_network_interface.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 1a683cd6-58ea-4b93-a6e9-a23b56afddf0 + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/detach_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/detach_volume.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,8 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + vol-4d826724 + i-6058a509 + /dev/sdh + detaching + 2008-05-08T11:51:50.000Z + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/disassociate_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/disassociate_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/disassociate_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/disassociate_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + dfb841f8-cc26-4f45-a3ac-dc08589eec1d + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/get_console_output.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/get_console_output.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/get_console_output.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/get_console_output.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,6 @@ + + f0ffb5ce-8d62-4ab9-add7-67a0f99c9811 + i-40128925 + 2013-12-02T12:31:38.000Z + VGVzdCBTdHJpbmc= + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/import_key_pair.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/import_key_pair.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/import_key_pair.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/import_key_pair.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,5 @@ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + keypair + 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/modify_image_attribute.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,3 @@ + + true + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/modify_instance_attribute.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/reboot_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/reboot_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/reboot_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/reboot_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 76dabb7a-fb39-4ed1-b5e0-31a4a0fdf5c0 + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/register_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/register_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/register_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/register_image.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 6d858ea7-053e-4751-9fae-b891019fc8d2 + ami-57c2fb3e + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/release_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/release_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/release_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/release_address.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 23ec1390-8c1d-4a3e-8042-b1ad84933f57 + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/revoke_security_group_egress.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/revoke_security_group_ingress.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_iam_profile.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,35 @@ + + r-47a5403e + AIDADH4IGTRXXKCD + + + default + + + + + i-2ba64343 + ami-be3adfd7 + + 0 + pending + + + + example-key-name + 0 + m1.small + 2007-08-07T11:51:50.000Z + + us-east-1b + + + true + + + AIDGPMS9RO4H3FEXAMPLE + arn:aws:iam::123456789012:instance-profile/ExampleInstanceProfile + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + IdempotentParameterMismatch + + Arguments on this idempotent request are inconsistent with arguments used in previous request(s). + + + + 5dabd361-d2e0-4f79-937d-4b2852a3b719 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_idem.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_idem.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances_idem.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances_idem.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,32 @@ + + r-47a5402e + AIDADH4IGTRXXKCD + + + default + + + + + i-2ba64342 + ami-be3adfd7 + + 0 + pending + + + + example-key-name + 0 + m1.small + 2007-08-07T11:51:50.000Z + + us-east-1b + + + true + + testclienttoken + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/run_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/run_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,31 @@ + + r-47a5402e + AIDADH4IGTRXXKCD + + + default + + + + + i-2ba64342 + ami-be3adfd7 + + 0 + pending + + + + example-key-name + 0 + m1.small + 2007-08-07T11:51:50.000Z + + us-east-1b + + + true + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/start_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/start_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/start_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/start_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,17 @@ + + 322f78ee-967b-40c9-aecd-8d442022da20 + + + i-ff5de6aa + + 0 + pending + + + 80 + stopped + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/stop_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/stop_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/stop_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/stop_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ + + 4ace6850-c876-4971-af0b-67a4278c36a1 + + + i-2ba64342 + + 64 + stopping + + + 16 + running + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/terminate_instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/terminate_instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ec2/terminate_instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ec2/terminate_instances.xml 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ + + fa63083d-e0f7-4933-b31a-f266643bdee8 + + + i-4382922a + + 32 + shutting-down + + + 16 + running + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/htemplate_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/htemplate_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/htemplate_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/htemplate_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ +{"templates": [ + +{"uuid": "1", "hypervisor_name": "kvm-hvm", "cpus": 1, "memory": 512, "arch": "i686", "id": 1, "name": "Small"}, + +{"uuid": "2", "hypervisor_name": "kvm-hvm", "cpus": 2, "memory": 1024, "arch": "i686", "id": 2, "name": "Medium"}, + +{"uuid": "3", "hypervisor_name": "kvm-hvm", "cpus": 3, "memory": 2048, "arch": "x86_64", "id": 3, "name": "Large"} + +], "errno": 0, "message": "Success"} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/network_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/network_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/network_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/network_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errno": 0, "message": "Success", "networks": [{"uuid": "1", "vlan_id": null, "name": "Default"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/ptemplate_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/ptemplate_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/ptemplate_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/ptemplate_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ +{"errno": 0, "message": "Success", "packages": [ + +{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "1", "name": "centos54"}, + +{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "2", "name": "centos54 two"} +]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_delete.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errno": 0, "message": "Success"} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_start.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_start.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_start.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_start.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.12", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_action_stop.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_get.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_get.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_1_get.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_1_get.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "off", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ +{"errno": 0, "message": "Success", "vms": +[ +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, + +{"vnc_enabled": true, "uuid": 2, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:72:b4:71:21", "network_name": "Default", "uuid": "c76edd61-2dfd-11df-84ca-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5902", "name": "dummy-2", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "zoiZW31T", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, + +{"vnc_enabled": true, "uuid": 3, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "stopped", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} + +] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_put.json libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_put.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ecp/vm_put.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ecp/vm_put.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errno": 0, "message": "Success", "txid": "fc38963c-a9fa-11de-8c4b-001baaa56c51", "machine_id": "1234"} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/drives_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/drives_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/drives_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/drives_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "drive": "0012e24a-6eae-4279-9912-3432f698cec8", + "encryption:cipher": "aes-xts-plain", + "name": "test drive", + "read:bytes": "4096", + "read:requests": "1", + "size": 10737418240, + "status": "active", + "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", + "write:bytes": "4096", + "write:requests": "1" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/drives_info.json libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/drives_info.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/drives_info.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/drives_info.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "drive": "0012e24a-6eae-4279-9912-3432f698cec8", + "encryption:cipher": "aes-xts-plain", + "name": "test drive", + "read:bytes": "4096", + "read:requests": "1", + "size": 10737418240, + "status": "active", + "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", + "write:bytes": "4096", + "write:requests": "1" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/servers_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/servers_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/servers_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/servers_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "ide:0:0:read:bytes": "299696128", + "ide:0:0:read:requests": "73168", + "ide:0:0:write:bytes": "321044480", + "ide:0:0:write:requests": "78380", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], + "nic:0:model": "virtio", + "rx": 679560, + "rx:packets": 644, + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "smp": 1, + "started": 1280723696, + "status": "active", + "tx": 21271, + "tx:packets": "251", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:ip": "216.151.208.174", + "vnc:password": "testvncpass" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/servers_info.json libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/servers_info.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/elastichosts/servers_info.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/elastichosts/servers_info.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,27 @@ +[ + { + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "ide:0:0:read:bytes": "299696128", + "ide:0:0:read:requests": "73168", + "ide:0:0:write:bytes": "321044480", + "ide:0:0:write:requests": "78380", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], + "nic:0:model": "virtio", + "rx": 679560, + "rx:packets": 644, + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "smp": 1, + "started": 1280723696, + "status": "active", + "tx": 21271, + "tx:packets": "251", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:ip": "216.151.208.174", + "vnc:password": "testvncpass" + } +] \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/account_info_rating.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/account_info_rating.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/account_info_rating.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/account_info_rating.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,58 @@ + + + + + + +handle +AB9090-GANDI + + +rating_enabled +1 + + +date_credits_expiration + + +credits +0 + + +products + + +average_credit_cost + + +share_definition + + +fullname +Aymeric BARANTAL + + +id +24 + + +resources + + +available + + +granted + + +used + + +expired + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/account_info.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/account_info.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/account_info.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/account_info.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,317 @@ + + + + + + +handle +AB3917-GANDI + + +products + + + +errors_for_updating + +product_name_does_not_match +no_action_on_free_product + + + +can_release +1 + + +date_end + + + +product_name +shares_fixed + + +autorenew + + + +errors_for_removing + + + + +errors_for_releasing + +no_action_on_free_product +not_available_resource + + + +is_in_redemption + + + +errors_for_autorenewing + +no_action_on_free_product + + + +duration +1y + + +date_created +20101028T12:38:17 + + +quantity +12 + + +errors_for_renewing + +no_action_on_free_product + + + +id +11153 + + +redemption +7 + + + + +errors_for_updating + +no_action_on_free_product + + + +can_release +0 + + +date_end + + + +product_name +ips + + +autorenew + + + +errors_for_removing + + + + +errors_for_releasing + +no_action_on_free_product +db_can_not_release + + + +is_in_redemption + + + +errors_for_autorenewing + +no_action_on_free_product + + + +duration +1m + + +date_created +20110124T11:42:35 + + +quantity +4 + + +errors_for_renewing + +no_action_on_free_product + + + +id +11196 + + +redemption +7 + + + + + +share_definition + + +servers +1 + + +bandwidth +5120.0 + + +memory +256 + + +cores +0.25 + + +slots +0.66666666666666663 + + +disk +8192 + + + + +fullname +Aymeric Barantal + + +id +58757 + + +resources + + +available + + +shares +12 + + +servers +8 + + +ips +4 + + +bandwidth +51200.0 + + +memory +2560 + + +cores +3.0 + + +slots +4.0 + + +disk +89088 + + + + +granted + + +shares +12 + + +servers +12 + + +ips +8 + + +bandwidth +61440 + + +memory +3072 + + +cores +5.0 + + +slots +8.0 + + +disk +98304 + + + + +used + + +servers +4 + + +ips +4 + + +bandwidth +10240.0 + + +memory +512 + + +cores +2.0 + + +slots +4 + + +disk +9216 + + + + +expired + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/datacenter_list.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/datacenter_list.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/datacenter_list.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/datacenter_list.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + + + + + + country + France + + + iso + FR + + + id + 1 + + + name + Equinix Paris + + + + + + + country + United States of America + + + iso + US + + + id + 2 + + + name + Level3 Baltimore + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_attach.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_attach.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_attach.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_attach.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T12:57:05 + + +vm_id +250133 + + +date_start + + + +disk_id +34918 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:57:05 + + +type +disk_attach + + +id +657982 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_create_from.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_create_from.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_create_from.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_create_from.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T14:20:56 + + +vm_id + + + +date_start + + + +disk_id +35288 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T14:20:56 + + +type +disk_create + + +id +657985 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_create.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_create.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_create.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_create.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,49 @@ + + + + + + +iface_id + + +date_updated +20120629T11:48:20 + + +vm_id + + +date_start + + +disk_id +1263 + + +source +AB3917-GANDI + + +step +DONE + + +ip_id + + +date_created +20120629T11:48:20 + + +type +disk_create + + +id +10895 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_delete.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_delete.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_delete.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_delete.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,49 @@ + + + + + + +iface_id + + +date_updated +20120629T11:47:06 + + +vm_id + + +date_start + + +disk_id +1262 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + +date_created +20120629T11:47:06 + + +type +disk_delete + + +id +10894 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_detach.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_detach.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_detach.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_detach.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T12:57:35 + + +vm_id +250133 + + +date_start + + + +disk_id +34918 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:57:35 + + +type +disk_detach + + +id +657983 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_info.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_info.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_info.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_info.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,73 @@ + + + + + + +datacenter_id +1 + + +name +libcloud + + +snapshot_profile + + +kernel_version + + +can_snapshot +1 + + +kernel_cmdline + + +visibility +private + + +label + + +vms_id + + + + +source + + +state +created + + +is_boot_disk +0 + + +date_updated +20120629T11:49:00 + + +date_created +20120629T11:48:20 + + +type +data + + +id +1263 + + +size +1024 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_list.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_list.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_list.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_list.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,200 @@ + + + + + + + +datacenter_id +1 + + +name +disk_libcloud2 + + +kernel_version +2.6.32 + + +can_snapshot +0 + + +visibility +private + + +label +Debian 5 + + +vms_id + + + + +source +23351 + + +state +created + + +is_boot_disk +0 + + +date_updated +20101116T10:51:59 + + +date_created +20101028T13:52:38 + + +type +data + + +id +34918 + + +size +3072 + + + + +datacenter_id +1 + + +name +test1 + + +kernel_version +2.6.32 + + +can_snapshot + + + +visibility +private + + +label +Debian 5 + + +vms_id + +250133 + + + +source +23351 + + +state +created + + +is_boot_disk +1 + + +date_updated +20110120T15:02:01 + + +date_created +20110120T14:57:55 + + +type +data + + +id +34951 + + +size +3072 + + + + +datacenter_id +1 + + +name +test_disk + + +kernel_version +2.6.32 + + +can_snapshot +1 + + +visibility +private + + +label +Debian 5 + + +vms_id + +250288 + + + +source +23351 + + +state +created + + +is_boot_disk +1 + + +date_updated +20110325T16:31:11 + + +date_created +20110324T17:14:06 + + +type +data + + +id +35170 + + +size +3072 + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_update.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_update.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/disk_update.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/disk_update.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T14:23:10 + + +vm_id + + + +date_start + + + +disk_id +34951 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T14:23:10 + + +type +disk_update + + +id +657987 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_attach.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_attach.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_attach.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_attach.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id +7857 + + +date_updated +20110921T12:49:35 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:49:35 + + +type +iface_attach + + +id +657980 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_detach.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_detach.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_detach.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_detach.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id +7857 + + +date_updated +20110921T12:53:29 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:53:29 + + +type +iface_detach + + +id +657981 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_list.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_list.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/iface_list.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/iface_list.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,99 @@ + + + + + + + +date_updated +20110120T14:58:44 + + +vm_id +250133 + + +bandwidth +5120.0 + + +datacenter_id +1 + + +state +used + + +num +0 + + +ips_id + +9256 +9294 + + + +date_created +20110120T14:57:55 + + +type +public + + +id +7857 + + + + +date_updated +20110324T17:14:16 + + +vm_id +250288 + + +bandwidth +5192.0 + + +datacenter_id +1 + + +state +used + + +num +0 + + +ips_id + +9298 +9508 + + + +date_created +20110324T17:14:06 + + +type +public + + +id +8019 + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/image_list_dc0.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/image_list_dc0.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/image_list_dc0.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/image_list_dc0.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,493 @@ + + + + + + + + + + + date_updated + 20100928T10:41:38 + + + disk_id + 34198 + + + label + GandiOS + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 2 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11233 + + + label + Mandriva 2008.0 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 3 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11235 + + + label + Centos 5 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 4 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11236 + + + label + Fedora Core 7 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 5 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11237 + + + label + Open SUSE 10.3 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 6 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11238 + + + label + Debian 4 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 7 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11239 + + + label + Fedora Core 8 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 8 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11240 + + + label + Open SUSE 11.0 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 9 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11241 + + + label + Mandriva 2008.1 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 10 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11242 + + + label + Ubuntu 8.04 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 11 + + + + + + + date_updated + 20100922T11:56:05 + + + disk_id + 23351 + + + label + Debian 5 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20090101T00:00:00 + + + author_id + 248842 + + + id + 12 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 23352 + + + label + Ubuntu 9.04 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20090101T00:00:00 + + + author_id + 248842 + + + id + 13 + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/ip_list.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/ip_list.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/ip_list.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/ip_list.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,261 @@ + + + + + + + +reverse +xvm-6-186.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:57:55 + + +ip +10.5.6.186 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20101028T12:49:11 + + +id +9256 + + + + +reverse +xvm6-fe37-9f7b.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:58:44 + + +ip +2001:4b98:dc0:543:216:3eff:fe37:9f7b + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110120T14:58:44 + + +id +9294 + + + + +reverse +xvm-6-179.ghst.net + + +iface_id +7861 + + +date_updated +20110124T15:53:44 + + +ip +10.5.6.179 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20110124T11:43:17 + + +id +9298 + + + + +reverse +xvm6-fea8-3724.ghst.net + + +iface_id +7861 + + +date_updated +20110124T15:54:44 + + +ip +2001:4b98:dc0:543:216:3eff:fea8:3724 + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110124T15:54:44 + + +id +9301 + + + + +reverse + + + +iface_id + + + +date_updated +20110217T17:39:39 + + +ip + + + +datacenter_id +1 + + +state +being_created + + +num + + + +version +4 + + +date_created +20110217T17:39:39 + + +id +9323 + + + + +reverse +xvm-6-26.ghst.net + + +iface_id + + + +date_updated +20110225T11:59:55 + + +ip +10.5.6.26 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20110224T16:46:33 + + +id +9332 + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/operation_info.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/operation_info.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/operation_info.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/operation_info.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + +date_updated +20110324T15:49:50 + + +last_error + + + +date_start + + + +source +AB3917-GANDI + + +step +DONE + + +eta +39 + + +date_created +20110324T15:49:32 + + +type +vm_delete + + +id +637366 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_create_from.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_create_from.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_create_from.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_create_from.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,147 @@ + + + + + + + +iface_id + + + +date_updated +20110324T17:14:06 + + +type +disk_create + + +date_start + + + +disk_id +35170 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T17:14:06 + + +vm_id + + + +id +637370 + + + + +iface_id +8019 + + +date_updated +20110324T17:14:06 + + +vm_id + + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id +9298 + + +date_created +20110324T17:14:06 + + +type +iface_create + + +id +637371 + + + + +iface_id + + + +date_updated +20110324T17:14:07 + + +type +vm_create + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T17:14:07 + + +vm_id +250288 + + +id +637372 + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_delete.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_delete.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_delete.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_delete.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110324T15:49:32 + + +vm_id +250136 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T15:49:32 + + +type +vm_delete + + +id +637366 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_info.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_info.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_info.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_info.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,330 @@ + + + + + + +memory +256 + + +hostname +test2 + + +console +0 + + +description + + + +triggers + + + + +date_updated +20110120T15:25:07 + + +disks + + + +datacenter_id +1 + + +name +test2 + + +kernel_version +2.6.32 + + +can_snapshot + + + +kernel_cmdline + + +root +/dev/xvda1 + + +ro +1 + + +console +xvc0 + + +nosep +1 + + + + +visibility +private + + +label +Debian 5 + + +vms_id + +250133 + + + +source +23351 + + +state +running + + +is_boot_disk +1 + + +date_updated +20110120T15:02:01 + + +date_created +20110120T14:57:55 + + +type +data + + +id +34951 + + +size +3072 + + + + + +disks_id + +34951 + + + +datacenter_id +1 + + +state +running + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +ifaces + + + +date_updated +20110120T14:58:44 + + +vm_id +250133 + + +bandwidth +5120.0 + + +datacenter_id +1 + + +ips + + + +reverse +xvm-6-186.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:57:55 + + +ip +10.5.6.186 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20101028T12:49:11 + + +id +9256 + + + + +reverse +xvm6-fe37-9f7b.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:58:44 + + +ip +2001:4b98:dc0:543:216:3eff:fe37:9f7b + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110120T14:58:44 + + +id +9294 + + + + + +state +used + + +num +0 + + +ips_id + +9256 +9294 + + + +date_created +20110120T14:57:55 + + +type +public + + +id +7857 + + + + + +cores +1 + + +ifaces_id + +7857 + + + +graph_urls + + +vcpu + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vcpu&device_number=0 + + + +vdi + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vdi&device_number=0 + + + +vif + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vif&device_number=0 + + + + + +date_created +20110120T14:57:55 + + +id +250133 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_list.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_list.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_list.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_list.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,141 @@ + + + + + + + +memory +256 + + +console +0 + + +description + + + +date_updated +20110120T15:25:07 + + +hostname +test1 + + +disks_id + +34951 + + + +datacenter_id +1 + + +state +running + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +cores +1 + + +ifaces_id + +7857 + + + +date_created +20110120T14:57:55 + + +id +250133 + + + + +memory +256 + + +console +0 + + +description + + + +date_updated +20110225T12:09:31 + + +hostname +test2 + + +disks_id + +34954 + + + +datacenter_id +1 + + +state +halted + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +cores +1 + + +ifaces_id + +7861 + + + +date_created +20110124T15:53:44 + + +id +250136 + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_reboot.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_reboot.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_reboot.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_reboot.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110325T13:18:27 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110325T13:18:27 + + +type +vm_reboot + + +id +637398 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_stop.xml libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_stop.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/gandi/vm_stop.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gandi/vm_stop.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110325T13:19:52 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110325T13:19:52 + + +type +vm_stop + + +id +637399 + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_addresses.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_addresses.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_addresses.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_addresses.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,71 @@ +{ + "id": "projects/project_name/aggregated/addresses", + "items": { + "regions/europe-west1": { + "addresses": [ + { + "address": "192.158.29.247", + "creationTimestamp": "2013-06-26T09:51:47.506-07:00", + "description": "", + "id": "10955781597205896134", + "kind": "compute#address", + "name": "libcloud-demo-europe-address", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1/addresses/libcloud-demo-europe-address", + "status": "RESERVED" + } + ] + }, + "regions/us-central1": { + "addresses": [ + { + "address": "173.255.113.20", + "creationTimestamp": "2013-06-26T12:21:40.625-07:00", + "description": "", + "id": "01531551729918243104", + "kind": "compute#address", + "name": "lcaddress", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "status": "RESERVED" + }, + { + "address": "108.59.82.4", + "creationTimestamp": "2013-06-26T09:48:31.184-07:00", + "description": "", + "id": "17634862894218443422", + "kind": "compute#address", + "name": "libcloud-demo-address", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", + "status": "RESERVED" + }, + { + "address": "173.255.114.104", + "creationTimestamp": "2013-06-04T16:28:43.764-07:00", + "description": "", + "id": "11879548153827627972", + "kind": "compute#address", + "name": "testaddress", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/testaddress", + "status": "RESERVED" + } + ] + }, + "regions/us-central2": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/us-central2" + } + ], + "message": "There are no results for scope 'regions/us-central2' on this page." + } + } + }, + "kind": "compute#addressAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/addresses" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_disks.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_disks.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_disks.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_disks.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,165 @@ +{ + "id": "projects/project_name/aggregated/disks", + "items": { + "zones/europe-west1-a": { + "disks": [ + { + "creationTimestamp": "2013-12-13T10:43:33.753-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "0819226106922408217", + "kind": "compute#disk", + "name": "libcloud-demo-europe-boot-disk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "creationTimestamp": "2013-12-13T10:43:20.420-08:00", + "id": "30789070506648158", + "kind": "compute#disk", + "name": "libcloud-demo-europe-attach-disk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "creationTimestamp": "2013-12-13T10:43:07.390-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "01221310665639400697", + "kind": "compute#disk", + "name": "libcloud-demo-europe-np-node", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "creationTimestamp": "2013-12-13T10:43:53.598-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "17495188440080825940", + "kind": "compute#disk", + "name": "libcloud-demo-europe-multiple-nodes-000", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/europe-west1-b" + } + ], + "message": "There are no results for scope 'zones/europe-west1-b' on this page." + } + }, + "zones/us-central1-a": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central1-a" + } + ], + "message": "There are no results for scope 'zones/us-central1-a' on this page." + } + }, + "zones/us-central1-b": { + "disks": [ + { + "creationTimestamp": "2013-09-04T11:03:54.122-07:00", + "description": "Persistent boot disk created from https://www.googleapis.com/compute/v1beta15/projects/debian-cloud/global/images/debian-7-wheezy-v20130723.", + "id": "8658241308250794904", + "kind": "compute#disk", + "name": "test1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/test1", + "sizeGb": "10", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" + } + ] + }, + "zones/us-central2-a": { + "disks": [ + { + "creationTimestamp": "2013-12-13T10:41:59.430-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "3371304879167251249", + "kind": "compute#disk", + "name": "libcloud-demo-boot-disk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-boot-disk", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "creationTimestamp": "2013-12-13T10:42:15.355-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "12650345960824309663", + "kind": "compute#disk", + "name": "libcloud-demo-multiple-nodes-000", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-000", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "creationTimestamp": "2013-12-13T10:41:52.533-08:00", + "id": "01867312924613359214", + "kind": "compute#disk", + "name": "libcloud-demo-attach-disk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-attach-disk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "creationTimestamp": "2013-12-13T10:42:15.949-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "12498700959701905851", + "kind": "compute#disk", + "name": "libcloud-demo-multiple-nodes-001", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-001", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "creationTimestamp": "2013-12-13T10:41:44.063-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "345757781195247006", + "kind": "compute#disk", + "name": "libcloud-demo-np-node", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-np-node", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + } + ] + } + }, + "kind": "compute#diskAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/disks" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,59 @@ +{ + "id": "projects/project_name/aggregated/forwardingRules", + "items": { + "regions/europe-west1": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/europe-west1" + } + ], + "message": "There are no results for scope 'regions/europe-west1' on this page." + } + }, + "regions/us-central1": { + "forwardingRules": [ + { + "IPAddress": "108.59.86.60", + "IPProtocol": "TCP", + "creationTimestamp": "2013-12-13T10:51:47.602-08:00", + "id": "0401221837226610637", + "kind": "compute#forwardingRule", + "name": "libcloud-lb-demo-lb", + "portRange": "80-80", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + }, + { + "IPAddress": "173.255.114.35", + "IPProtocol": "TCP", + "creationTimestamp": "2013-12-13T10:52:57.170-08:00", + "id": "06342111469679701315", + "kind": "compute#forwardingRule", + "name": "lcforwardingrule", + "portRange": "8000-8500", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + } + ] + }, + "regions/us-central2": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/us-central2" + } + ], + "message": "There are no results for scope 'regions/us-central2' on this page." + } + } + }, + "kind": "compute#forwardingRuleAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/forwardingRules" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_instances.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_instances.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_instances.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_instances.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,466 @@ +{ + "id": "projects/project_name/aggregated/instances", + "items": { + "zones/europe-west1-a": { + "instances": [ + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:58.782-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-multiple-nodes-000", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", + "type": "PERSISTENT" + } + ], + "id": "10947706194464948790", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.28.252", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.122.85" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:37.267-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "517678477070693411", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.251.128.32", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.240.204" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "EbZdwVRtKyg=", + "items": [ + "libcloud", + "newtag" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:12.706-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-np-node", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", + "type": "PERSISTENT" + }, + { + "deviceName": "libcloud-demo-europe-attach-disk", + "index": 1, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", + "type": "PERSISTENT" + } + ], + "id": "3421745795082776097", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.251.128.10", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.221.125" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/europe-west1-b" + } + ], + "message": "There are no results for scope 'zones/europe-west1-b' on this page." + } + }, + "zones/us-central1-a": { + "instances": [ + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:45:23.351-08:00", + "disks": [ + { + "boot": true, + "deviceName": "persistent-disk-0", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", + "type": "PERSISTENT" + } + ], + "id": "4006034190819017667", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.236.58.15", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.72.75" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" + } + ] + }, + "zones/us-central1-b": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "zones/us-central1-b" + } + ], + "message": "There are no results for scope 'zones/us-central1-b' on this page." + } + }, + "zones/us-central2-a": { + "instances": [ + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:42:03.180-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "2184470466384636715", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.120.70", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.235.148" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "EbZdwVRtKyg=", + "items": [ + "libcloud", + "newtag" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:41:47.059-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-np-node", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-np-node", + "type": "PERSISTENT" + }, + { + "deviceName": "libcloud-demo-attach-disk", + "index": 1, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-attach-disk", + "type": "PERSISTENT" + } + ], + "id": "18059053700460342373", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.120.58", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.45.206" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:42:24.841-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-multiple-nodes-000", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-000", + "type": "PERSISTENT" + } + ], + "id": "4196532528539285480", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.120.211", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.218.251" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:42:19.041-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-multiple-nodes-001", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-multiple-nodes-001", + "type": "PERSISTENT" + } + ], + "id": "1066146046261788296", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-multiple-nodes-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.120.207", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.24.29" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/instances/libcloud-demo-multiple-nodes-001", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + } + ] + } + }, + "kind": "compute#instanceAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/instances" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_machineTypes.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,1908 @@ +{ + "id": "projects/project_name/aggregated/machineTypes", + "items": { + "zones/europe-west1-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/f1-micro", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-8-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-4-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-2", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/g1-small", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-8", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highmem-4", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-highcpu-2-d", + "zone": "europe-west1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-4-d", + "zone": "europe-west1-a" + } + ] + }, + "zones/europe-west1-b": { + "machineTypes": [ + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/f1-micro", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-2", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-8", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/g1-small", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-2-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-1", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-4", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-2", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-standard-4", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highcpu-8-d", + "zone": "europe-west1-b" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b/machineTypes/n1-highmem-4", + "zone": "europe-west1-b" + } + ] + }, + "zones/us-central1-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/g1-small", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-a" + } + ] + }, + "zones/us-central1-b": { + "machineTypes": [ + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/g1-small", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/f1-micro", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-8", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-8-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-4", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-2-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-highmem-2", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1-d", + "zone": "us-central1-b" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", + "zone": "us-central1-b" + } + ] + }, + "zones/us-central2-a": { + "machineTypes": [ + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/g1-small", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/f1-micro", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-2-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-8", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-8-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highmem-2", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central2-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/machineTypes/n1-standard-1-d", + "zone": "us-central2-a" + } + ] + } + }, + "kind": "compute#machineTypeAggregatedList", + "nextPageToken": "ChhQRVJfUFJPSkVDVF9NQUNISU5FX1RZUEUSGjYwMDUzMTk1NTY3NS5uMS1zdGFuZGFyZC04", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/machineTypes" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_targetPools.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_targetPools.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/aggregated_targetPools.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/aggregated_targetPools.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,77 @@ +{ + "id": "projects/project_name/aggregated/targetPools", + "items": { + "regions/europe-west1": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/europe-west1" + } + ], + "message": "There are no results for scope 'regions/europe-west1' on this page." + } + }, + "regions/us-central1": { + "targetPools": [ + { + "creationTimestamp": "2013-11-01T14:50:04.620-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "6918395933376220338", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" + ], + "kind": "compute#targetPool", + "name": "libcloud-lb-demo-lb-tp", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + }, + { + "creationTimestamp": "2013-11-01T14:51:45.822-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "2277093827336176997", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" + ], + "kind": "compute#targetPool", + "name": "lctargetpool", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" + }, + { + "creationTimestamp": "2013-11-01T12:09:45.831-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" + ], + "id": "03531496913089065061", + "kind": "compute#targetPool", + "name": "www-pool", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/www-pool", + "sessionAffinity": "NONE" + } + ] + }, + "regions/us-central2": { + "warning": { + "code": "NO_RESULTS_ON_PAGE", + "data": [ + { + "key": "scope", + "value": "regions/us-central2" + } + ], + "message": "There are no results for scope 'regions/us-central2' on this page." + } + } + }, + "kind": "compute#targetPoolAggregatedList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/aggregated/targetPools" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/generic_disk.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/generic_disk.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/generic_disk.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/generic_disk.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "creationTimestamp": "2013-12-13T10:54:04.074-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "3535838963674672928", + "kind": "compute#disk", + "name": "genericdisk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/genericdisk", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,117 @@ +{ + "id": "projects/project_name/global/firewalls", + "items": [ + { + "allowed": [ + { + "IPProtocol": "udp" + }, + { + "IPProtocol": "tcp" + }, + { + "IPProtocol": "icmp" + } + ], + "creationTimestamp": "2013-06-25T19:50:41.630-07:00", + "description": "", + "id": "5399576268464751692", + "kind": "compute#firewall", + "name": "default-allow-internal", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/default-allow-internal", + "sourceRanges": [ + "10.240.0.0/16" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "22" + ] + } + ], + "creationTimestamp": "2013-06-25T19:48:25.111-07:00", + "description": "", + "id": "8063006729705804986", + "kind": "compute#firewall", + "name": "default-ssh", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/default-ssh", + "sourceRanges": [ + "0.0.0.0/0" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "3141" + ] + } + ], + "creationTimestamp": "2013-11-01T14:46:25.155-07:00", + "id": "13827675544891616808", + "kind": "compute#firewall", + "name": "libcloud-demo-europe-firewall", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/libcloud-demo-europe-firewall", + "sourceRanges": [ + "0.0.0.0/0" + ], + "sourceTags": [ + "libcloud" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "3141" + ] + } + ], + "creationTimestamp": "2013-11-01T14:44:31.284-07:00", + "id": "1648761630208029546", + "kind": "compute#firewall", + "name": "libcloud-demo-firewall", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/libcloud-demo-firewall", + "sourceRanges": [ + "0.0.0.0/0" + ], + "sourceTags": [ + "libcloud" + ] + }, + { + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "80" + ] + } + ], + "creationTimestamp": "2013-08-19T14:40:22.562-07:00", + "description": "", + "id": "01326795494450101956", + "kind": "compute#firewall", + "name": "www-firewall", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/www-firewall", + "sourceRanges": [ + "0.0.0.0/0" + ], + "targetTags": [ + "www-tag" + ] + } + ], + "kind": "compute#firewallList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "8983098895755095934", + "insertTime": "2013-06-26T10:04:53.453-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", + "startTime": "2013-06-26T10:04:53.508-07:00", + "status": "PENDING", + "targetId": "0565629596395414121", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,19 @@ +{ + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "4567" + ] + } + ], + "creationTimestamp": "2013-06-26T10:04:43.773-07:00", + "id": "0565629596395414121", + "kind": "compute#firewall", + "name": "lcfirewall", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "sourceTags": [ + "libcloud" + ] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_lcfirewall_put.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "6526551968265354277", + "insertTime": "2013-06-26T20:52:00.355-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_put", + "operationType": "update", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", + "startTime": "2013-06-26T20:52:00.410-07:00", + "status": "PENDING", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_firewalls_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_firewalls_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "id": "16789512465352307784", + "insertTime": "2013-06-26T20:51:06.068-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_post", + "startTime": "2013-06-26T20:51:06.128-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_basic-check.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "checkIntervalSec": 5, + "creationTimestamp": "2013-08-19T14:42:28.947-07:00", + "description": "", + "healthyThreshold": 2, + "host": "", + "id": "7660832580304455442", + "kind": "compute#httpHealthCheck", + "name": "basic-check", + "port": 80, + "requestPath": "/", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check", + "timeoutSec": 5, + "unhealthyThreshold": 2 +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,49 @@ +{ + "id": "projects/project_name/global/httpHealthChecks", + "items": [ + { + "checkIntervalSec": 5, + "creationTimestamp": "2013-08-19T14:42:28.947-07:00", + "description": "", + "healthyThreshold": 2, + "host": "", + "id": "7660832580304455442", + "kind": "compute#httpHealthCheck", + "name": "basic-check", + "port": 80, + "requestPath": "/", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check", + "timeoutSec": 5, + "unhealthyThreshold": 2 + }, + { + "checkIntervalSec": 10, + "creationTimestamp": "2013-12-13T10:52:46.800-08:00", + "healthyThreshold": 3, + "host": "lchost", + "id": "022194976205566532", + "kind": "compute#httpHealthCheck", + "name": "lchealthcheck", + "port": 9000, + "requestPath": "/lc", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/lchealthcheck", + "timeoutSec": 10, + "unhealthyThreshold": 4 + }, + { + "checkIntervalSec": 5, + "creationTimestamp": "2013-12-13T10:51:42.762-08:00", + "healthyThreshold": 2, + "id": "08359377740909791076", + "kind": "compute#httpHealthCheck", + "name": "libcloud-lb-demo-healthcheck", + "port": 80, + "requestPath": "/", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck", + "timeoutSec": 5, + "unhealthyThreshold": 2 + } + ], + "kind": "compute#httpHealthCheckList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "1159296103027566387", + "insertTime": "2013-09-02T22:18:02.509-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_lchealthcheck_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_delete", + "startTime": "2013-09-02T22:18:02.558-07:00", + "status": "PENDING", + "targetId": "06860603312991823381", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "checkIntervalSec": 10, + "creationTimestamp": "2013-09-02T22:18:01.180-07:00", + "healthyThreshold": 3, + "host": "lchost", + "id": "06860603312991823381", + "kind": "compute#httpHealthCheck", + "name": "lchealthcheck", + "port": 8000, + "requestPath": "/lc", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/lchealthcheck", + "timeoutSec": 10, + "unhealthyThreshold": 4 +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck_put.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "6717642434182216609", + "insertTime": "2013-09-03T02:19:55.574-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_lchealthcheck_put", + "operationType": "update", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_put", + "startTime": "2013-09-03T02:19:55.628-07:00", + "status": "PENDING", + "targetId": "0742691415598204878", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_libcloud-lb-demo-healthcheck.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "checkIntervalSec": 5, + "creationTimestamp": "2013-09-02T22:25:44.759-07:00", + "healthyThreshold": 2, + "id": "16372093408499501663", + "kind": "compute#httpHealthCheck", + "name": "libcloud-lb-demo-healthcheck", + "port": 80, + "requestPath": "/", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck", + "timeoutSec": 5, + "unhealthyThreshold": 2 +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "id": "3903393118268087410", + "insertTime": "2013-09-03T02:19:54.629-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_post", + "startTime": "2013-09-03T02:19:54.718-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_debian_6_squeeze_v20130926_deprecate.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_debian_6_squeeze_v20130926_deprecate.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_debian_6_squeeze_v20130926_deprecate.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_debian_6_squeeze_v20130926_deprecate.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "status": "PENDING", + "kind": "compute#operation", + "name": "operation-1394594316110-4f4604ad0e708-2e4622ab", + "startTime": "2014-03-11T20:18:36.194-07:00", + "insertTime": "2014-03-11T20:18:36.110-07:00", + "targetId": "10034929421075729520", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian_6_squeeze_v20130926", + "operationType": "setDeprecation", + "progress": 0, + "id": "11223768474922166090", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-1394594316110-4f4604ad0e708-2e4622ab", + "user": "user@developer.gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20130617_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20130617_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20130617_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_debian_7_wheezy_v20130617_delete.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "kind": "compute#operation", + "id": "10762099380229198553", + "name": "operation-global_images_debian7_delete", + "operationType": "delete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian-7-wheezy-v20130617", + "targetId": "14881612020726561163", + "status": "PENDING", + "user": "user@developer.gserviceaccount.com", + "progress": 0, + "insertTime": "2014-03-11T14:37:48.075-07:00", + "startTime": "2014-03-11T14:37:48.158-07:00", + "endTime": "2014-03-11T14:37:48.634-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_images_debian7_delete" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,51 @@ +{ + "id": "projects/project_name/global/images", + "items": [ + { + "creationTimestamp": "2013-06-19T13:47:20.563-07:00", + "description": "Local Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", + "id": "1549141992333368759", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130617", + "preferredKernel": "https://www.googleapis.com/compute/v1/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2013-11-18T12:24:21.560-07:00", + "id": "1539141992335368259", + "kind": "compute#image", + "name": "centos-6-v20131118", + "preferredKernel": "https://www.googleapis.com/compute/v1/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/centos-6-v20131118", + "sourceType": "RAW", + "status": "READY" + }, + { + "creationTimestamp": "2014-03-09T21:04:31.291-07:00", + "description": "CoreOS test image", + "id": "15196339658718959621", + "kind": "compute#image", + "name": "coreos", + "preferredKernel": "https://www.googleapis.com/compute/v1/projects/google/global/kernels/gce-v20130603", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/coreos", + "sourceType": "RAW", + "status": "READY" + } + ], + "kind": "compute#imageList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_images_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_images_post.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "id": "15196339658718959621", + "insertTime": "2014-03-09T21:04:31.228-07:00", + "kind": "compute#operation", + "name": "coreos", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_image_post", + "startTime": "2014-03-09T21:04:31.291-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/coreos", + "user": "897001307951@developer.gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_default.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.240.0.0/16", + "creationTimestamp": "2013-06-19T12:37:13.233-07:00", + "gatewayIPv4": "10.240.0.1", + "id": "08257021638942464470", + "kind": "compute#network", + "name": "default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,34 @@ +{ + "id": "projects/project_name/global/networks", + "items": [ + { + "IPv4Range": "10.240.0.0/16", + "creationTimestamp": "2013-06-19T12:37:13.233-07:00", + "gatewayIPv4": "10.240.0.1", + "id": "08257021638942464470", + "kind": "compute#network", + "name": "default", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default" + }, + { + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:51:34.018-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "13254259054875092094", + "kind": "compute#network", + "name": "libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-europe-network" + }, + { + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:48:15.703-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "17172579178188075621", + "kind": "compute#network", + "name": "libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-network" + } + ], + "kind": "compute#networkList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "4914541423567262393", + "insertTime": "2013-06-26T10:05:11.102-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_lcnetwork_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", + "startTime": "2013-06-26T10:05:11.273-07:00", + "status": "PENDING", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_lcnetwork.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.11.0.0/16", + "creationTimestamp": "2013-06-26T10:05:03.500-07:00", + "gatewayIPv4": "10.11.0.1", + "id": "16211908079305042870", + "kind": "compute#network", + "name": "lcnetwork", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-europe-network.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:51:34.018-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "13254259054875092094", + "kind": "compute#network", + "name": "libcloud-demo-europe-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-europe-network" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_libcloud-demo-network.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "IPv4Range": "10.10.0.0/16", + "creationTimestamp": "2013-06-26T09:48:15.703-07:00", + "gatewayIPv4": "10.10.0.1", + "id": "17172579178188075621", + "kind": "compute#network", + "name": "libcloud-demo-network", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/libcloud-demo-network" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_networks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_networks_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "id": "3681664092089171723", + "insertTime": "2013-06-26T10:05:03.271-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_post", + "startTime": "2013-06-26T10:05:03.315-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "id": "projects/project_name/global/snapshots", + "items": [ + { + "creationTimestamp": "2013-12-16T13:03:51.345-08:00", + "description": "", + "diskSizeGb": "1", + "id": "17482266715940883688", + "kind": "compute#snapshot", + "name": "lcsnapshot", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", + "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sourceDiskId": "-2511816066479461182", + "status": "READY" + }, + { + "creationTimestamp": "2013-12-16T12:48:12.557-08:00", + "description": "", + "diskSizeGb": "10", + "id": "3341332334980930052", + "kind": "compute#snapshot", + "name": "libcloud-demo-snapshot", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/libcloud-demo-snapshot", + "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a/disks/libcloud-demo-snap-template", + "sourceDiskId": "-6245698478147030397", + "status": "READY" + } + ], + "kind": "compute#snapshotList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "5994251357251874363", + "insertTime": "2013-12-16T13:04:03.831-08:00", + "kind": "compute#operation", + "name": "operation-global_snapshots_lcsnapshot_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_snapshots_lcsnapshot_delete", + "startTime": "2013-12-16T13:04:03.924-08:00", + "status": "PENDING", + "targetId": "17482266715940883688", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", + "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "creationTimestamp": "2013-12-16T13:03:51.345-08:00", + "description": "", + "diskSizeGb": "1", + "id": "17482266715940883688", + "kind": "compute#snapshot", + "name": "lcsnapshot", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", + "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sourceDiskId": "-2511816066479461182", + "status": "READY" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:00.978-07:00", + "id": "8983098895755095934", + "insertTime": "2013-06-26T10:04:53.453-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_delete", + "startTime": "2013-06-26T10:04:53.508-07:00", + "status": "DONE", + "targetId": "0565629596395414121", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_lcfirewall_put.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T20:52:10.075-07:00", + "id": "6526551968265354277", + "insertTime": "2013-06-26T20:52:00.355-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_lcfirewall_put", + "operationType": "update", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_lcfirewall_put", + "startTime": "2013-06-26T20:52:00.410-07:00", + "status": "DONE", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_firewalls_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T20:51:12.108-07:00", + "id": "16789512465352307784", + "insertTime": "2013-06-26T20:51:06.068-07:00", + "kind": "compute#operation", + "name": "operation-global_firewalls_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_firewalls_post", + "startTime": "2013-06-26T20:51:06.128-07:00", + "status": "DONE", + "targetId": "10942695305090163011", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/firewalls/lcfirewall", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "1159296103027566387", + "insertTime": "2013-09-02T22:18:02.509-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_lchealthcheck_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_delete", + "startTime": "2013-09-02T22:18:02.558-07:00", + "status": "DONE", + "targetId": "06860603312991823381", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_lchealthcheck_put.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-09-03T02:20:02.194-07:00", + "id": "6717642434182216609", + "insertTime": "2013-09-03T02:19:55.574-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_lchealthcheck_put", + "operationType": "update", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_lchealthcheck_put", + "startTime": "2013-09-03T02:19:55.628-07:00", + "status": "DONE", + "targetId": "0742691415598204878", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_httpHealthChecks_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "3903393118268087410", + "insertTime": "2013-09-03T02:19:54.629-07:00", + "kind": "compute#operation", + "name": "operation-global_httpHealthChecks_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_httpHealthChecks_post", + "startTime": "2013-09-03T02:19:54.718-07:00", + "status": "DONE", + "targetId": "0742691415598204878", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/healthChecks/lchealthcheck", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_image_post.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2014-03-09T21:04:33.291-07:00", + "id": "15196339658718959621", + "insertTime": "2014-03-09T21:04:31.228-07:00", + "kind": "compute#operation", + "name": "coreos", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_image_post", + "startTime": "2014-03-09T21:04:31.291-07:00", + "status": "DONE", + "targetId": "12551176716147327315", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/coreos", + "user": "897001307951@developer.gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_images_debian7_delete.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "kind": "compute#operation", + "id": "10762099380229198553", + "name": "operation-global_images_debian7_delete", + "operationType": "delete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/images/debian-7-wheezy-v20130617", + "targetId": "14881612020726561163", + "status": "DONE", + "user": "user@developer.gserviceaccount.com", + "progress": 100, + "insertTime": "2014-03-11T14:37:48.075-07:00", + "startTime": "2014-03-11T14:37:48.158-07:00", + "endTime": "2014-03-11T14:37:48.634-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_images_debian7_delete" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_lcnetwork_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:12.607-07:00", + "id": "4914541423567262393", + "insertTime": "2013-06-26T10:05:11.102-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_lcnetwork_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_lcnetwork_delete", + "startTime": "2013-06-26T10:05:11.273-07:00", + "status": "DONE", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_networks_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-06-26T10:05:07.630-07:00", + "id": "3681664092089171723", + "insertTime": "2013-06-26T10:05:03.271-07:00", + "kind": "compute#operation", + "name": "operation-global_networks_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_networks_post", + "startTime": "2013-06-26T10:05:03.315-07:00", + "status": "DONE", + "targetId": "16211908079305042870", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/lcnetwork", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_global_snapshots_lcsnapshot_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "endTime": "2013-12-16T13:04:11.565-08:00", + "id": "5994251357251874363", + "insertTime": "2013-12-16T13:04:03.831-08:00", + "kind": "compute#operation", + "name": "operation-global_snapshots_lcsnapshot_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/operations/operation-global_snapshots_lcsnapshot_delete", + "startTime": "2013-12-16T13:04:03.924-08:00", + "status": "DONE", + "targetId": "17482266715940883688", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/snapshots/lcsnapshot", + "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_lcaddress_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "7128783508312083402", + "insertTime": "2013-06-26T12:21:44.075-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_lcaddress_delete", + "operationType": "delete", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", + "startTime": "2013-06-26T12:21:44.110-07:00", + "status": "DONE", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_addresses_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "16064059851942653139", + "insertTime": "2013-06-26T12:21:40.299-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_post", + "operationType": "insert", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", + "startTime": "2013-06-26T12:21:40.358-07:00", + "status": "DONE", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T00:17:43.917-07:00", + "id": "09064254309855814339", + "insertTime": "2013-09-03T00:17:36.062-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", + "operationType": "delete", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", + "startTime": "2013-09-03T00:17:36.168-07:00", + "status": "DONE", + "targetId": "10901665092293158938", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_forwardingRules_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T00:17:33.965-07:00", + "id": "0651769405845333112", + "insertTime": "2013-09-03T00:17:25.381-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_forwardingRules_post", + "operationType": "insert", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_post", + "startTime": "2013-09-03T00:17:25.434-07:00", + "status": "DONE", + "targetId": "10901665092293158938", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T01:28:49.271-07:00", + "id": "17341029456963557514", + "insertTime": "2013-09-03T01:28:40.774-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", + "operationType": "addHealthCheck", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", + "startTime": "2013-09-03T01:28:40.838-07:00", + "status": "DONE", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T01:29:07.021-07:00", + "id": "04072826501537092633", + "insertTime": "2013-09-03T01:29:03.082-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", + "operationType": "addInstance", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", + "startTime": "2013-09-03T01:29:03.145-07:00", + "status": "DONE", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "13500662190763995965", + "insertTime": "2013-09-03T00:51:06.799-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_delete", + "operationType": "delete", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_delete", + "startTime": "2013-09-03T00:51:06.840-07:00", + "status": "DONE", + "targetId": "13598380121688918358", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T01:28:37.095-07:00", + "id": "14738174613993796821", + "insertTime": "2013-09-03T01:28:32.889-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", + "operationType": "removeHealthCheck", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", + "startTime": "2013-09-03T01:28:32.942-07:00", + "status": "DONE", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-09-03T01:28:59.247-07:00", + "id": "1815686149437875016", + "insertTime": "2013-09-03T01:28:53.049-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", + "operationType": "removeInstance", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", + "startTime": "2013-09-03T01:28:53.109-07:00", + "status": "DONE", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_regions_us-central1_targetPools_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "7487852523793007955", + "insertTime": "2013-09-03T00:51:05.064-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_post", + "operationType": "insert", + "progress": 100, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_post", + "startTime": "2013-09-03T00:51:05.115-07:00", + "status": "DONE", + "targetId": "13598380121688918358", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_europe-west1-a_instances_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "error": { + "errors": [ + { + "code": "RESOURCE_ALREADY_EXISTS", + "message": "The resource 'projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node' already exists" + } + ] + }, + "httpErrorMessage": "CONFLICT", + "httpErrorStatusCode": 409, + "id": "1510575454210533141", + "insertTime": "2013-06-26T20:57:34.366-07:00", + "kind": "compute#operation", + "name": "operation-zones_europe-west1-a_instances_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", + "startTime": "2013-06-26T20:57:34.453-07:00", + "status": "DONE", + "targetId": "14308265828754333159", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-12-16T13:04:01.580-08:00", + "id": "0158330665043557584", + "insertTime": "2013-12-16T13:03:51.000-08:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", + "operationType": "createSnapshot", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", + "startTime": "2013-12-16T13:03:51.042-08:00", + "status": "DONE", + "targetId": "07494414044179227172", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_lcdisk_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "06887337364510109333", + "insertTime": "2013-06-26T10:06:11.835-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", + "startTime": "2013-06-26T10:06:12.006-07:00", + "status": "DONE", + "targetId": "16109451798967042451", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_disks_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:25.375-07:00", + "id": "0211151278250678078", + "insertTime": "2013-06-26T16:48:17.403-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", + "startTime": "2013-06-26T16:48:17.479-07:00", + "status": "DONE", + "targetId": "03196637868764498730", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:36.800-07:00", + "id": "3319596145594427549", + "insertTime": "2013-06-26T16:13:12.903-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", + "startTime": "2013-06-26T16:13:12.948-07:00", + "status": "DONE", + "targetId": "5390075309006132922", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:56.931-07:00", + "id": "17469711273432628502", + "insertTime": "2013-06-26T16:13:40.579-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", + "startTime": "2013-06-26T16:13:40.620-07:00", + "status": "DONE", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:31.831-07:00", + "id": "7455886659787654716", + "insertTime": "2013-06-26T16:48:27.691-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "operationType": "attachDisk", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "startTime": "2013-06-26T16:48:27.762-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T10:06:00.917-07:00", + "id": "6999931397447918763", + "insertTime": "2013-06-26T10:05:40.350-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_delete", + "operationType": "delete", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", + "startTime": "2013-06-26T10:05:40.405-07:00", + "status": "DONE", + "targetId": "07410051435384876224", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:48:41.278-07:00", + "id": "3921383727105838816", + "insertTime": "2013-06-26T16:48:35.357-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "operationType": "detachDisk", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "startTime": "2013-06-26T16:48:35.398-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_reset_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "10507122129283663728", + "insertTime": "2013-06-26T15:03:02.766-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_reset_post", + "operationType": "reset", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", + "startTime": "2013-06-26T15:03:02.813-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T21:20:10.487-07:00", + "id": "8115150846190320932", + "insertTime": "2013-06-26T21:20:03.962-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", + "operationType": "setTags", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", + "startTime": "2013-06-26T21:20:04.103-07:00", + "status": "DONE", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/operations_operation_zones_us-central1-a_instances_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "endTime": "2013-06-26T16:13:08.382-07:00", + "id": "1858155812259649243", + "insertTime": "2013-06-26T16:12:51.492-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_post", + "operationType": "insert", + "progress": 100, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", + "startTime": "2013-06-26T16:12:51.537-07:00", + "status": "DONE", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/project.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/project.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/project.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/project.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,59 @@ +{ + "commonInstanceMetadata": { + "items": [ + { + "key": "sshKeys", + "value": "ASDFASDF" + } + ], + "kind": "compute#metadata" + }, + "creationTimestamp": "2013-02-05T16:19:20.516-08:00", + "description": "", + "id": "2193465259114366848", + "kind": "compute#project", + "name": "project_name", + "quotas": [ + { + "limit": 1000.0, + "metric": "SNAPSHOTS", + "usage": 0.0 + }, + { + "limit": 5.0, + "metric": "NETWORKS", + "usage": 3.0 + }, + { + "limit": 100.0, + "metric": "FIREWALLS", + "usage": 5.0 + }, + { + "limit": 100.0, + "metric": "IMAGES", + "usage": 0.0 + }, + { + "limit": 100.0, + "metric": "ROUTES", + "usage": 6.0 + }, + { + "limit": 50.0, + "metric": "FORWARDING_RULES", + "usage": 0.0 + }, + { + "limit": 50.0, + "metric": "TARGET_POOLS", + "usage": 1.0 + }, + { + "limit": 50.0, + "metric": "HEALTH_CHECKS", + "usage": 1.0 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/projects_debian-cloud_global_images.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,379 @@ +{ + "id": "projects/debian-cloud/global/images", + "items": [ + { + "archiveSizeBytes": "365056004", + "creationTimestamp": "2013-12-02T17:49:01.206-08:00", + "description": "Debian GNU/Linux 7.2 (wheezy) with backports kernel built on 2013-11-27", + "id": "11823693270029497919", + "kind": "compute#image", + "name": "backports-debian-7-wheezy-v20131127", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-v20131127", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "214107225", + "creationTimestamp": "2013-05-07T17:09:22.111-07:00", + "deprecated": { + "deleted": "1970-01-03", + "deprecated": "1970-01-01", + "obsolete": "1970-01-02", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130507", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-07", + "id": "647943287916432906", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130507", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130507", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "255972840", + "creationTimestamp": "2013-05-09T12:56:21.720-07:00", + "deprecated": { + "deprecated": "2013-11-12T21:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-09", + "id": "15745758816845911589", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130509", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130509", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "219458106", + "creationTimestamp": "2013-05-14T21:01:12.124-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-15", + "id": "006866479348046290", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130515", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130515", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "265118282", + "creationTimestamp": "2013-05-30T09:48:37.837-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-05-22", + "id": "1266148899538866390", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130522", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130522", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "233984980", + "creationTimestamp": "2013-06-19T13:45:44.111-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-06-17", + "id": "04009358257173422091", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130617", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130617", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "258168500", + "creationTimestamp": "2013-07-24T12:31:06.054-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-07-23", + "id": "3115342424904648000", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130723", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130723", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "300710522", + "creationTimestamp": "2013-09-04T13:21:53.292-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-08-16", + "id": "06130699342353523133", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130816", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130816", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "300710522", + "creationTimestamp": "2013-10-11T09:26:47.736-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 6.0.7 (squeeze) built on 2013-09-26", + "id": "0225119674082940764", + "kind": "compute#image", + "name": "debian-6-squeeze-v20130926", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-6-squeeze-v20130926", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "237290472", + "creationTimestamp": "2013-05-07T17:01:30.071-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-07", + "id": "15638477823580670459", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130507", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130507", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "270107366", + "creationTimestamp": "2013-05-09T12:56:47.910-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-09", + "id": "020034532765408091", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130509", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130509", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "265604335", + "creationTimestamp": "2013-05-14T21:02:55.044-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-15", + "id": "0587071888358410836", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130515", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130515", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "284301993", + "creationTimestamp": "2013-05-30T09:47:30.980-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.0 (wheezy) built on 2013-05-22", + "id": "622079684385221180", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130522", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130522", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "310882322", + "creationTimestamp": "2013-06-19T13:47:20.563-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-06-17", + "id": "1549141992333368759", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130617", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130617", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "258869806", + "creationTimestamp": "2013-07-24T12:31:36.790-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-07-23", + "id": "3119304810142650253", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130723", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130723", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "279162251", + "creationTimestamp": "2013-09-04T13:24:30.479-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-08-16", + "id": "2595370902107306327", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130816", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130816", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "279162251", + "creationTimestamp": "2013-10-11T09:26:56.993-07:00", + "deprecated": { + "deprecated": "2013-11-14T00:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.1 (wheezy) built on 2013-09-26", + "id": "06737951524754934395", + "kind": "compute#image", + "name": "debian-7-wheezy-v20130926", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20130926", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "405683884", + "creationTimestamp": "2013-10-28T13:52:08.233-07:00", + "deprecated": { + "deprecated": "2013-12-02T12:00:00Z", + "replacement": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "state": "DEPRECATED" + }, + "description": "Debian GNU/Linux 7.2 (wheezy) built on 2013-10-14", + "id": "1405559880052641502", + "kind": "compute#image", + "name": "debian-7-wheezy-v20131014", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131014", + "sourceType": "RAW", + "status": "READY" + }, + { + "archiveSizeBytes": "341857472", + "creationTimestamp": "2013-11-25T15:17:00.436-08:00", + "description": "Debian GNU/Linux 7.2 (wheezy) built on 2013-11-20", + "id": "05708985343919147751", + "kind": "compute#image", + "name": "debian-7-wheezy-v20131120", + "rawDisk": { + "containerType": "TAR", + "source": "" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceType": "RAW", + "status": "READY" + } + ], + "kind": "compute#imageList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,111 @@ +{ + "id": "projects/project_name/regions", + "items": [ + { + "creationTimestamp": "2013-04-19T17:58:16.641-07:00", + "description": "europe-west1", + "id": "0827308347805275727", + "kind": "compute#region", + "name": "europe-west1", + "quotas": [ + { + "limit": 24.0, + "metric": "CPUS", + "usage": 0.0 + }, + { + "limit": 5120.0, + "metric": "DISKS_TOTAL_GB", + "usage": 0.0 + }, + { + "limit": 7.0, + "metric": "STATIC_ADDRESSES", + "usage": 0.0 + }, + { + "limit": 23.0, + "metric": "IN_USE_ADDRESSES", + "usage": 0.0 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", + "status": "UP", + "zones": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b" + ] + }, + { + "creationTimestamp": "2013-04-19T18:17:05.050-07:00", + "description": "us-central1", + "id": "06713580496607310378", + "kind": "compute#region", + "name": "us-central1", + "quotas": [ + { + "limit": 24.0, + "metric": "CPUS", + "usage": 3.0 + }, + { + "limit": 5120.0, + "metric": "DISKS_TOTAL_GB", + "usage": 10.0 + }, + { + "limit": 7.0, + "metric": "STATIC_ADDRESSES", + "usage": 0.0 + }, + { + "limit": 23.0, + "metric": "IN_USE_ADDRESSES", + "usage": 4.0 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "status": "UP", + "zones": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" + ] + }, + { + "creationTimestamp": "2013-04-19T18:19:05.482-07:00", + "description": "us-central2", + "id": "04157375529195793136", + "kind": "compute#region", + "name": "us-central2", + "quotas": [ + { + "limit": 24.0, + "metric": "CPUS", + "usage": 0.0 + }, + { + "limit": 5120.0, + "metric": "DISKS_TOTAL_GB", + "usage": 0.0 + }, + { + "limit": 7.0, + "metric": "STATIC_ADDRESSES", + "usage": 0.0 + }, + { + "limit": 23.0, + "metric": "IN_USE_ADDRESSES", + "usage": 0.0 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central2", + "status": "UP", + "zones": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a" + ] + } + ], + "kind": "compute#regionList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "id": "projects/project_name/regions/us-central1/addresses", + "items": [ + { + "address": "108.59.82.4", + "creationTimestamp": "2013-06-26T09:48:31.184-07:00", + "description": "", + "id": "17634862894218443422", + "kind": "compute#address", + "name": "libcloud-demo-address", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/libcloud-demo-address", + "status": "RESERVED" + }, + { + "address": "173.255.114.104", + "creationTimestamp": "2013-06-04T16:28:43.764-07:00", + "description": "", + "id": "11879548153827627972", + "kind": "compute#address", + "name": "testaddress", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/testaddress", + "status": "RESERVED" + } + ], + "kind": "compute#addressList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "7128783508312083402", + "insertTime": "2013-06-26T12:21:44.075-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_lcaddress_delete", + "operationType": "delete", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_lcaddress_delete", + "startTime": "2013-06-26T12:21:44.110-07:00", + "status": "PENDING", + "targetId": "01531551729918243104", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_lcaddress.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "address": "173.255.113.20", + "creationTimestamp": "2013-06-26T12:21:40.625-07:00", + "description": "", + "id": "01531551729918243104", + "kind": "compute#address", + "name": "lcaddress", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "status": "RESERVED" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_addresses_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "16064059851942653139", + "insertTime": "2013-06-26T12:21:40.299-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_addresses_post", + "operationType": "insert", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_addresses_post", + "startTime": "2013-06-26T12:21:40.358-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/addresses/lcaddress", + "user": "897001307951@developer.gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "id": "projects/project_name/regions/us-central1/forwardingRules", + "items": [ + { + "IPAddress": "173.255.119.224", + "IPProtocol": "TCP", + "creationTimestamp": "2013-09-03T00:17:25.544-07:00", + "id": "10901665092293158938", + "kind": "compute#forwardingRule", + "name": "lcforwardingrule", + "portRange": "8000-8500", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + }, + { + "IPAddress": "173.255.119.185", + "IPProtocol": "TCP", + "creationTimestamp": "2013-09-02T22:25:50.575-07:00", + "id": "15826316229163619337", + "kind": "compute#forwardingRule", + "name": "libcloud-lb-demo-lb", + "portRange": "80-80", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + } + ], + "kind": "compute#forwardingRuleList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "09064254309855814339", + "insertTime": "2013-09-03T00:17:36.062-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", + "operationType": "delete", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_lcforwardingrule_delete", + "startTime": "2013-09-03T00:17:36.168-07:00", + "status": "PENDING", + "targetId": "10901665092293158938", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "IPAddress": "173.255.119.224", + "IPProtocol": "TCP", + "creationTimestamp": "2013-09-03T00:17:25.544-07:00", + "id": "10901665092293158938", + "kind": "compute#forwardingRule", + "name": "lcforwardingrule", + "portRange": "8000-8500", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "IPAddress": "108.59.83.110", + "IPProtocol": "TCP", + "creationTimestamp": "2013-09-29T13:30:00.702-07:00", + "id": "1077550228014866104", + "kind": "compute#forwardingRule", + "name": "libcloud-lb-demo-lb", + "portRange": "80-80", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/libcloud-lb-demo-lb", + "target": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "0651769405845333112", + "insertTime": "2013-09-03T00:17:25.381-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_forwardingRules_post", + "operationType": "insert", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_forwardingRules_post", + "startTime": "2013-09-03T00:17:25.434-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,38 @@ +{ + "id": "projects/project_name/regions/us-central1/targetPools", + "items": [ + { + "creationTimestamp": "2013-09-03T00:51:05.300-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "13598380121688918358", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" + ], + "kind": "compute#targetPool", + "name": "lctargetpool", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" + }, + { + "creationTimestamp": "2013-09-02T22:25:45.817-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "16862638289615591831", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" + ], + "kind": "compute#targetPool", + "name": "libcloud-lb-demo-lb-tp", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" + } + ], + "kind": "compute#targetPoolList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "17341029456963557514", + "insertTime": "2013-09-03T01:28:40.774-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", + "operationType": "addHealthCheck", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addHealthCheck_post", + "startTime": "2013-09-03T01:28:40.838-07:00", + "status": "PENDING", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_addInstance_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "04072826501537092633", + "insertTime": "2013-09-03T01:29:03.082-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", + "operationType": "addInstance", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_addInstance_post", + "startTime": "2013-09-03T01:29:03.145-07:00", + "status": "PENDING", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_delete.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "13500662190763995965", + "insertTime": "2013-09-03T00:51:06.799-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_delete", + "operationType": "delete", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_delete", + "startTime": "2013-09-03T00:51:06.840-07:00", + "status": "PENDING", + "targetId": "13598380121688918358", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "creationTimestamp": "2013-09-03T00:51:05.300-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "13598380121688918358", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001" + ], + "kind": "compute#targetPool", + "name": "lctargetpool", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "14738174613993796821", + "insertTime": "2013-09-03T01:28:32.889-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", + "operationType": "removeHealthCheck", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post", + "startTime": "2013-09-03T01:28:32.942-07:00", + "status": "PENDING", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_removeInstance_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "1815686149437875016", + "insertTime": "2013-09-03T01:28:53.049-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", + "operationType": "removeInstance", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_lctargetpool_removeInstance_post", + "startTime": "2013-09-03T01:28:53.109-07:00", + "status": "PENDING", + "targetId": "16862638289615591831", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "creationTimestamp": "2013-09-02T22:25:45.817-07:00", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/libcloud-lb-demo-healthcheck" + ], + "id": "16862638289615591831", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-002", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000" + ], + "kind": "compute#targetPool", + "name": "libcloud-lb-demo-lb-tp", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/libcloud-lb-demo-lb-tp" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_post.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "7487852523793007955", + "insertTime": "2013-09-03T00:51:05.064-07:00", + "kind": "compute#operation", + "name": "operation-regions_us-central1_targetPools_post", + "operationType": "insert", + "progress": 0, + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/operations/operation-regions_us-central1_targetPools_post", + "startTime": "2013-09-03T00:51:05.115-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool", + "user": "user@gserviceaccount.com" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_www-pool.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "creationTimestamp": "2013-08-19T14:43:25.289-07:00", + "description": "", + "healthChecks": [ + "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/basic-check" + ], + "id": "09965129111508633746", + "instances": [ + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www1", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www2", + "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/www3" + ], + "kind": "compute#targetPool", + "name": "www-pool", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/www-pool" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,169 @@ +{ + "id": "projects/project_name/zones/europe-west1-a/instances", + "items": [ + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:58.782-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-multiple-nodes-000", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-multiple-nodes-000", + "type": "PERSISTENT" + } + ], + "id": "10947706194464948790", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-multiple-nodes-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "192.158.28.252", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.122.85" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-multiple-nodes-000", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:12.706-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-np-node", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-np-node", + "type": "PERSISTENT" + }, + { + "deviceName": "libcloud-demo-europe-attach-disk", + "index": 1, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-attach-disk", + "type": "PERSISTENT" + } + ], + "id": "3421745795082776097", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-np-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.251.128.10", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.221.125" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "status": "RUNNING", + "tags": { + "fingerprint": "W7t6ZyTyIrc=", + "items": [ + "libcloud" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + }, + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:43:37.267-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-demo-europe-boot-disk", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/disks/libcloud-demo-europe-boot-disk", + "type": "PERSISTENT" + } + ], + "id": "517678477070693411", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "libcloud-demo-europe-persist-node", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.251.128.32", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.240.204" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-persist-node", + "status": "RUNNING", + "tags": { + "fingerprint": "EbZdwVRtKyg=", + "items": [ + "libcloud", + "newtag" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" + } + ], + "kind": "compute#instanceList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_instances_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "1510575454210533141", + "insertTime": "2013-06-26T20:57:34.366-07:00", + "kind": "compute#operation", + "name": "operation-zones_europe-west1-a_instances_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/operations/operation-zones_europe-west1-a_instances_post", + "startTime": "2013-06-26T20:57:34.453-07:00", + "status": "PENDING", + "targetId": "14308265828754333159", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/instances/libcloud-demo-europe-np-node", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_europe-west1-a_machineTypes_n1-standard-1.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a/machineTypes/n1-standard-1", + "zone": "europe-west1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones.json 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,85 @@ +{ + "id": "projects/project_name/zones", + "items": [ + { + "creationTimestamp": "2013-02-05T16:19:23.254-08:00", + "description": "europe-west1-a", + "id": "13416642339679437530", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2014-01-18T12:00:00.000-08:00", + "description": "maintenance zone", + "endTime": "2014-02-02T12:00:00.000-08:00", + "name": "2014-01-18-planned-outage" + } + ], + "name": "europe-west1-a", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-a", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.254-08:00", + "description": "europe-west1-b", + "id": "20623650177407096", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2014-03-15T12:00:00.000-07:00", + "description": "maintenance zone", + "endTime": "2014-03-30T12:00:00.000-07:00", + "name": "2014-03-15-planned-outage" + } + ], + "name": "europe-west1-b", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/europe-west1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/europe-west1-b", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-a", + "id": "13462829244527433283", + "kind": "compute#zone", + "name": "us-central1-a", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-b", + "id": "1045862591201432620", + "kind": "compute#zone", + "name": "us-central1-b", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", + "status": "UP" + }, + { + "creationTimestamp": "2013-02-05T16:19:23.257-08:00", + "deprecated": { + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b", + "state": "DEPRECATED" + }, + "description": "us-central2-a", + "id": "1001467574647549152", + "kind": "compute#zone", + "maintenanceWindows": [ + { + "beginTime": "2013-12-31T12:00:00.000-08:00", + "description": "maintenance zone", + "endTime": "2014-07-01T12:00:00.000-07:00", + "name": "2013-12-31-planned-outage" + } + ], + "name": "us-central2-a", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central2-a", + "status": "UP" + } + ], + "kind": "compute#zoneList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,30 @@ +{ + "id": "projects/project_name/zones/us-central1-a/disks", + "items": [ + { + "creationTimestamp": "2013-12-13T10:45:42.139-08:00", + "id": "08045379695757218000", + "kind": "compute#disk", + "name": "lcdisk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" + }, + { + "creationTimestamp": "2013-12-13T10:45:20.308-08:00", + "description": "Image: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "id": "0265567475385851075", + "kind": "compute#disk", + "name": "node-name", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", + "sizeGb": "10", + "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", + "sourceImageId": "17312518942796567788", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" + } + ], + "kind": "compute#diskList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_createSnapshot_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "0158330665043557584", + "insertTime": "2013-12-16T13:03:51.000-08:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", + "operationType": "createSnapshot", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_createSnapshot_post", + "startTime": "2013-12-16T13:03:51.042-08:00", + "status": "PENDING", + "targetId": "07494414044179227172", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "487551519631-t6qvu2na6p4u9ptm46bsdujf0ohbdro7@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk_delete.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "06887337364510109333", + "insertTime": "2013-06-26T10:06:11.835-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_lcdisk_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_lcdisk_delete", + "startTime": "2013-06-26T10:06:12.006-07:00", + "status": "PENDING", + "targetId": "16109451798967042451", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "creationTimestamp": "2013-06-26T10:06:04.007-07:00", + "id": "16109451798967042451", + "kind": "compute#disk", + "name": "lcdisk", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "sizeGb": "1", + "status": "READY", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "0211151278250678078", + "insertTime": "2013-06-26T16:48:17.403-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_disks_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_disks_post", + "startTime": "2013-06-26T16:48:17.479-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,55 @@ +{ + "id": "projects/project_name/zones/us-central1-a/instances", + "items": [ + { + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:45:23.351-08:00", + "disks": [ + { + "boot": true, + "deviceName": "persistent-disk-0", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", + "type": "PERSISTENT" + } + ], + "id": "4006034190819017667", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.236.58.15", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.72.75" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" + } + ], + "kind": "compute#instanceList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000_delete.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "3319596145594427549", + "insertTime": "2013-06-26T16:13:12.903-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-000_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-000_delete", + "startTime": "2013-06-26T16:13:12.948-07:00", + "status": "PENDING", + "targetId": "5390075309006132922", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-000.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,48 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:54:07.687-08:00", + "disks": [ + { + "boot": true, + "deviceName": "lcnode-000", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-000", + "type": "PERSISTENT" + } + ], + "id": "17170905942674172532", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "lcnode-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.114.35", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.160.66" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-000", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001_delete.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "17469711273432628502", + "insertTime": "2013-06-26T16:13:40.579-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_lcnode-001_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_lcnode-001_delete", + "startTime": "2013-06-26T16:13:40.620-07:00", + "status": "PENDING", + "targetId": "16630486471904253898", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_lcnode-001.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,48 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:54:08.639-08:00", + "disks": [ + { + "boot": true, + "deviceName": "lcnode-001", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcnode-001", + "type": "PERSISTENT" + } + ], + "id": "09356229693786319079", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "lcnode-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "173.255.117.19", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.168.208" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_attachDisk_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "7455886659787654716", + "insertTime": "2013-06-26T16:48:27.691-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "operationType": "attachDisk", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_attachDisk_post", + "startTime": "2013-06-26T16:48:27.762-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_delete.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "6999931397447918763", + "insertTime": "2013-06-26T10:05:40.350-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_delete", + "operationType": "delete", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_delete", + "startTime": "2013-06-26T10:05:40.405-07:00", + "status": "PENDING", + "targetId": "07410051435384876224", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_detachDisk_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "3921383727105838816", + "insertTime": "2013-06-26T16:48:35.357-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "operationType": "detachDisk", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_detachDisk_post", + "startTime": "2013-06-26T16:48:35.398-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,48 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:45:23.351-08:00", + "disks": [ + { + "boot": true, + "deviceName": "persistent-disk-0", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", + "type": "PERSISTENT" + } + ], + "id": "4006034190819017667", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "42WmSpB8rSM=", + "kind": "compute#metadata" + }, + "name": "node-name", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.236.58.15", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.72.75" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "status": "RUNNING", + "tags": { + "fingerprint": "42WmSpB8rSM=" + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_reset_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "10507122129283663728", + "insertTime": "2013-06-26T15:03:02.766-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_reset_post", + "operationType": "reset", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_reset_post", + "startTime": "2013-06-26T15:03:02.813-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "kind": "compute#operation", + "id": "14265294323024381703", + "name": "operation-volume-auto-delete", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "operationType": "setDiskAutoDelete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", + "targetId": "4313186599918690450", + "status": "PENDING", + "user": "user@developer.gserviceaccount.com", + "progress": 0, + "insertTime": "2014-03-13T21:50:57.612-07:00", + "startTime": "2014-03-13T21:50:57.717-07:00", + "endTime": "2014-03-13T21:50:58.047-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_node-name_setTags_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "id": "8115150846190320932", + "insertTime": "2013-06-26T21:20:03.962-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_node-name_setTags_post", + "operationType": "setTags", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_node-name_setTags_post", + "startTime": "2013-06-26T21:20:04.103-07:00", + "status": "PENDING", + "targetId": "1845312225624811608", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/node-name", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_instances_post.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "id": "1858155812259649243", + "insertTime": "2013-06-26T16:12:51.492-07:00", + "kind": "compute#operation", + "name": "operation-zones_us-central1-a_instances_post", + "operationType": "insert", + "progress": 0, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-zones_us-central1-a_instances_post", + "startTime": "2013-06-26T16:12:51.537-07:00", + "status": "PENDING", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/lcnode-001", + "user": "897001307951@developer.gserviceaccount.com", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "creationTimestamp": "2013-02-05T16:19:23.269-08:00", + "description": "us-central1-a", + "id": "13462829244527433283", + "kind": "compute#zone", + "name": "us-central1-a", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "status": "UP" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,424 @@ +{ + "id": "projects/project_name/zones/us-central1-a/machineTypes", + "items": [ + { + "creationTimestamp": "2013-04-25T13:32:49.088-07:00", + "description": "1 vCPU (shared physical core) and 0.6 GB RAM", + "guestCpus": 1, + "id": "1133568312750571513", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 614, + "name": "f1-micro", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/f1-micro", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2013-04-25T13:32:45.550-07:00", + "description": "1 vCPU (shared physical core) and 1.7 GB RAM", + "guestCpus": 1, + "id": "1500265464823777597", + "imageSpaceGb": 0, + "kind": "compute#machineType", + "maximumPersistentDisks": 4, + "maximumPersistentDisksSizeGb": "3072", + "memoryMb": 1740, + "name": "g1-small", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/g1-small", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:46:10.572-08:00", + "description": "2 vCPUs, 1.8 GB RAM", + "guestCpus": 2, + "id": "16898271314080235997", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:47:07.825-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 1.8 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "15178384466070744001", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 1843, + "name": "n1-highcpu-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:48:06.087-08:00", + "description": "4 vCPUs, 3.6 GB RAM", + "guestCpus": 4, + "id": "04759000181765218034", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:49:07.563-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUS, 3.6 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "01151097524490134507", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3686, + "name": "n1-highcpu-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:50:15.128-08:00", + "description": "8 vCPUs, 7.2 GB RAM", + "guestCpus": 8, + "id": "01206886442411821831", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:51:04.549-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUS, 7.2 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "02507333096579477005", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7373, + "name": "n1-highcpu-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highcpu-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:06.129-08:00", + "description": "2 vCPUs, 13 GB RAM", + "guestCpus": 2, + "id": "05438694236916301519", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:40:59.630-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 13 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "00770157291441082211", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 13312, + "name": "n1-highmem-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:42:08.983-08:00", + "description": "4 vCPUs, 26 GB RAM", + "guestCpus": 4, + "id": "11556032176405786676", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:43:17.400-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 26 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "05095504563332567951", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 26624, + "name": "n1-highmem-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:44:25.985-08:00", + "description": "8 vCPUs, 52 GB RAM", + "guestCpus": 8, + "id": "01717932668777642040", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-11-16T11:45:08.195-08:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 52 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "07181827135536388552", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 53248, + "name": "n1-highmem-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-highmem-8-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:34.258-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "state": "DEPRECATED" + }, + "description": "1 vCPU, 3.75 GB RAM, 1 scratch disk (420 GB)", + "guestCpus": 1, + "id": "10583029372018866711", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1-d", + "scratchDisks": [ + { + "diskGb": 420 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:48:56.867-07:00", + "description": "2 vCPUs, 7.5 GB RAM", + "guestCpus": 2, + "id": "17936898073622676356", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:19.448-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2", + "state": "DEPRECATED" + }, + "description": "2 vCPUs, 7.5 GB RAM, 1 scratch disk (870 GB)", + "guestCpus": 2, + "id": "06313284160910191442", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 7680, + "name": "n1-standard-2-d", + "scratchDisks": [ + { + "diskGb": 870 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-2-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:49:40.050-07:00", + "description": "4 vCPUs, 15 GB RAM", + "guestCpus": 4, + "id": "09494636486174545828", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:05.677-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4", + "state": "DEPRECATED" + }, + "description": "4 vCPUs, 15 GB RAM, 1 scratch disk (1770 GB)", + "guestCpus": 4, + "id": "00523085164784013586", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 15360, + "name": "n1-standard-4-d", + "scratchDisks": [ + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-4-d", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:50:42.334-07:00", + "description": "8 vCPUs, 30 GB RAM", + "guestCpus": 8, + "id": "04084282969223214132", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 30720, + "name": "n1-standard-8", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8", + "zone": "us-central1-a" + }, + { + "creationTimestamp": "2012-06-07T13:51:19.936-07:00", + "deprecated": { + "deprecated": "2013-12-02T20:00:00-08:00", + "replacement": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8", + "state": "DEPRECATED" + }, + "description": "8 vCPUs, 30 GB RAM, 2 scratch disks (1770 GB, 1770 GB)", + "guestCpus": 8, + "id": "00035824420671580077", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 30720, + "name": "n1-standard-8-d", + "scratchDisks": [ + { + "diskGb": 1770 + }, + { + "diskGb": 1770 + } + ], + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-8-d", + "zone": "us-central1-a" + } + ], + "kind": "compute#machineTypeList", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-a_machineTypes_n1-standard-1.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "creationTimestamp": "2012-06-07T13:48:14.670-07:00", + "description": "1 vCPU, 3.75 GB RAM", + "guestCpus": 1, + "id": "11077240422128681563", + "imageSpaceGb": 10, + "kind": "compute#machineType", + "maximumPersistentDisks": 16, + "maximumPersistentDisksSizeGb": "10240", + "memoryMb": 3840, + "name": "n1-standard-1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/machineTypes/n1-standard-1", + "zone": "us-central1-a" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "kind": "compute#operation", + "id": "14265294323024381703", + "name": "operation-volume-auto-delete", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "operationType": "setDiskAutoDelete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", + "targetId": "4313186599918690450", + "status": "DONE", + "user": "user@developer.gserviceaccount.com", + "progress": 100, + "insertTime": "2014-03-13T21:50:57.612-07:00", + "startTime": "2014-03-13T21:50:57.717-07:00", + "endTime": "2014-03-13T21:50:58.047-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-000.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,57 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:51:24.339-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-lb-demo-www-000", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-000", + "type": "PERSISTENT" + } + ], + "id": "08447900841145802741", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "IZjMMp0A_8k=", + "items": [ + { + "key": "startup-script", + "value": "apt-get -y update && apt-get -y install apache2 && hostname > /var/www/index.html" + } + ], + "kind": "compute#metadata" + }, + "name": "libcloud-lb-demo-www-000", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.236.58.15", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.104.11" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-000", + "status": "RUNNING", + "tags": { + "fingerprint": "XI0he92M8l8=", + "items": [ + "libcloud-lb-demo-www" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-001.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,57 @@ +{ + "canIpForward": false, + "creationTimestamp": "2013-12-13T10:51:25.165-08:00", + "disks": [ + { + "boot": true, + "deviceName": "libcloud-lb-demo-www-001", + "index": 0, + "kind": "compute#attachedDisk", + "mode": "READ_WRITE", + "source": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/disks/libcloud-lb-demo-www-001", + "type": "PERSISTENT" + } + ], + "id": "11523404878553997348", + "kind": "compute#instance", + "machineType": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/machineTypes/n1-standard-1", + "metadata": { + "fingerprint": "09vSzO6KXcw=", + "items": [ + { + "key": "startup-script", + "value": "apt-get -y update && apt-get -y install apache2 && hostname > /var/www/index.html" + } + ], + "kind": "compute#metadata" + }, + "name": "libcloud-lb-demo-www-001", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "23.236.58.59", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "https://www.googleapis.com/compute/v1/projects/project_name/global/networks/default", + "networkIP": "10.240.94.107" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "MIGRATE" + }, + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b/instances/libcloud-lb-demo-www-001", + "status": "RUNNING", + "tags": { + "fingerprint": "XI0he92M8l8=", + "items": [ + "libcloud-lb-demo-www" + ] + }, + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-b" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gce/zones_us-central1-b_instances_libcloud-lb-demo-www-002.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "error": { + "code": 404, + "errors": [ + { + "domain": "global", + "message": "The resource 'projects/project-name/zones/us-central1-b/instances/libcloud-lb-demo-www-002' was not found", + "reason": "notFound" + } + ], + "message": "The resource 'projects/project-name/zones/us-central1-b/instances/libcloud-lb-demo-www-002' was not found" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/image_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/image_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/image_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/image_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,180 @@ +{ + "list": [ + { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + { + "billingtokens": [ + { + "id": 47, + "name": "CentOS 5.3 64bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (64-bit) w/ None", + "friendlyName": "CentOS 5.3 (64-bit) w/ None", + "id": 1532, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img", + "name": "centos5.3_64_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (64-bit)", + "id": 17, + "name": "CentOS 5.3 (64-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + }, + { + "billingtokens": [ + { + "id": 48, + "name": "RHEL 5.4 32bit", + "price": 0 + } + ], + "description": "RHEL 5.4 (32-bit) w/ None", + "friendlyName": "RHEL 5.4 (32-bit) w/ None", + "id": 1533, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-4c88cb92-dd7b-4bb1-95b6-7cc93eb1d2aa.img", + "name": "rhel5.4_32_base", + "object": "serverimage", + "os": { + "description": "RHEL 5.4 (32-bit)", + "id": 18, + "name": "RHEL 5.4 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + }, + { + "billingtokens": [ + { + "id": 49, + "name": "RHEL 5.4 64bit", + "price": 0 + } + ], + "description": "RHEL 5.4 (64-bit) w/ None", + "friendlyName": "RHEL 5.4 (64-bit) w/ None", + "id": 1534, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2bd8ddb3-cc53-4a76-8188-0dce7537a422.img", + "name": "rhel5.4_64_base", + "object": "serverimage", + "os": { + "description": "RHEL 5.4 (64-bit)", + "id": 19, + "name": "RHEL 5.4 (64-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + } + ], + "method": "/grid/image/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 59, + "start": 0, + "total": 59 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/image_save.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/image_save.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/image_save.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/image_save.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,62 @@ +{ + "list": [ + { + "architecture": { + "description": "32 bit OS", + "id": 1, + "name": "32-bit", + "object": "option" + }, + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "object": "billingtoken", + "price": 0 + } + ], + "createdTime": 1289119839685, + "description": "", + "friendlyName": "testimage", + "id": 5050, + "isActive": true, + "isPublic": false, + "location": "123/GSI-3ee65927-f80d-43df-92df-6c7e352f009c.img", + "name": "GSI-3ee65927-f80d-43df-92df-6c7e352f009c", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": 123, + "name": "name", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is being saved", + "id": 1, + "name": "Saving", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1289119839685 + } + ], + "method": "/grid/image/save", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/ip_list_empty.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/ip_list_empty.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/ip_list_empty.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/ip_list_empty.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "list": [ + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 0, + "start": 0, + "total": 0 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/ip_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/ip_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/ip_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/ip_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,69 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348099, + "ip": "192.168.75.66", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348100, + "ip": "192.168.75.67", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348101, + "ip": "192.168.75.68", + "object": "ip", + "public": false, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + } + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 3, + "start": 0, + "total": 3 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "list": [ + { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + } + ], + "method": "/common/lookup/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 2, + "start": 0, + "total": 2 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/password_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/password_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/password_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/password_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,102 @@ +{ + "list": [ + { + "password": "bebebe", + "object": "password", + "username": "root", + "server": { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + } + ], + "method": "/grid/server/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_add.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_add.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_add.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_add.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,96 @@ +{ + "list": [ + { + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/add", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_delete.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_delete.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_delete.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_delete.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/delete", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_edit.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_edit.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_edit.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_edit.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/edit", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,98 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "description": "test server", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_power_fail.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_power_fail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_power_fail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_power_fail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/power", + "status": "failure", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_power.json libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_power.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/gogrid/server_power.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/gogrid/server_power.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/power", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/create_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/create_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/create_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/create_node.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "id": "62291" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/get_node.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/get_node.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/get_node.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/get_node.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "host": "659", + "rescue": "0", + "fqdn": "server1.vr-cluster.org", + "mbpkgid": "62291", + "locked": "0", + "os_id": "1613", + "os": "Debian 6 i386 PV", + "ip": "208.111.45.250", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:10::f98", + "city": "MAA - Chennai (Madras), India", + "status": "TERMINATED" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_images.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,98 @@ +[ + { + "id": "1739", + "os": "Gentoo 2012 (0619) i386", + "description": "Gentoo 2012 0619 i386", + "type": "gentoo", + "subtype": "pv", + "size": "840mb", + "created": "2012-06-27 13:21:03", + "updated": "2012-06-27 13:28:50", + "bits": "32", + "tech": "pv" + }, + { + "id": "1613", + "os": "CentOS 5.8 x64", + "description": "CentOS 5.8 x64", + "type": "centos", + "subtype": "pv", + "size": "1.2gb", + "created": "2012-03-31 01:34:54", + "updated": "2012-05-23 16:15:29", + "bits": "32", + "tech": "pv" + }, + { + "id": "1676", + "os": "Ubuntu Server 12.04 LTS 64 bit", + "description": "Ubuntu 12.04 LTS x64", + "type": "ubuntu", + "subtype": "pv", + "size": "800mb", + "created": "2012-05-04 06:20:59", + "updated": "2012-05-04 06:31:09", + "bits": "64", + "tech": "pv" + }, + { + "id": "1667", + "os": "Ubuntu Server 12.04 LTS 32 bit", + "description": "Ubuntu 12.04 i386", + "type": "ubuntu", + "subtype": "pv", + "size": "700mb", + "created": "2012-05-04 06:18:10", + "updated": "2012-05-04 06:31:08", + "bits": "32", + "tech": "pv" + }, + { + "id": "1640", + "os": "CentOS 6.2 x64", + "description": "CentOS 6.2 x64", + "type": "centos", + "subtype": "pv", + "size": "1.2gb", + "created": "2012-03-31 01:36:44", + "updated": "2012-03-31 01:39:25", + "bits": "64", + "tech": "pv" + }, + { + "id": "1631", + "os": "CentOS 6.2 i386", + "description": "CentOS 6.2 i386", + "type": "centos", + "subtype": "pv", + "size": "1.1gb", + "created": "2012-03-31 01:36:15", + "updated": "2012-03-31 01:38:50", + "bits": "32", + "tech": "pv" + }, + { + "id": "1622", + "os": "CentOS 5.8 i386", + "description": "CentOS 5.8 i386", + "type": "centos", + "subtype": "pv", + "size": "1.1gb", + "created": "2012-03-31 01:35:30", + "updated": "2012-03-31 01:38:49", + "bits": "32", + "tech": "pv" + }, + { + "id": "721", + "os": "Ubuntu 11.04 Server x64 PV", + "description": "

Ubuntu 11.04 base server installation.<\/p>", + "type": "ubuntu", + "subtype": "pv", + "size": "600mb", + "created": "2011-05-01 06:21:08", + "updated": "2011-05-01 13:21:08", + "bits": "64", + "tech": "pv" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_locations.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_locations.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_locations.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_locations.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,46 @@ +[ + { + "id": "3", + "name": "SJC - San Jose, CA" + }, + { + "id": "13", + "name": "IAD2- Reston, VA" + }, + { + "id": "21", + "name": "LAX3 - Los Angeles, CA" + }, + { + "id": "31", + "name": "CHI - Chicago, IL" + }, + { + "id": "41", + "name": "NYC - New York, NY" + }, + { + "id": "61", + "name": "MAA - Chennai (Madras), India" + }, + { + "id": "71", + "name": "LON - London, United Kingdom" + }, + { + "id": "72", + "name": "AMS2 - Amsterdam, NL" + }, + { + "id": "82", + "name": "FRA - Paris, France" + }, + { + "id": "83", + "name": "HK - Hong Kong, HK" + }, + { + "id": "101", + "name": "DFW - Dallas, TX" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_nodes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,72 @@ +[{ + "host": "659", + "rescue": "0", + "fqdn": "server1.vr-cluster.org", + "mbpkgid": "62291", + "locked": "0", + "os": "Debian 6 i386 PV", + "ip": "208.111.45.250", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:10::f98", + "city": "MAA - Chennai (Madras), India", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "902", + "rescue": "0", + "fqdn": "newbuild.vr.com", + "mbpkgid": "62327", + "locked": "0", + "os": "CentOS 5.8 x64", + "ip": "208.111.39.118", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:0:3f::f0d", + "city": "SJC - San Jose, CA", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "1010", + "rescue": "0", + "fqdn": "3test.build.com", + "mbpkgid": "62300", + "locked": "0", + "os": "CentOS 6.2 x64", + "ip": "208.111.40.179", + "installed": "0", + "state": "DOWN", + "package": "VR512", + "ipv6": "2607:f740:c::f4f", + "city": "LAX3 - Los Angeles, CA", + "status": "TERMINATED", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}, +{ + "host": "1028", + "rescue": "0", + "fqdn": "libcloud2.node.com", + "mbpkgid": "74567", + "locked": "0", + "os": "CentOS 5.8 x64", + "ip": "209.177.157.99", + "installed": "1", + "state": "UP", + "package": "VR512", + "ipv6": "2607:f740:b::eff", + "city": "IAD2- Reston, VA", + "status": "RUNNING", + "os_id" : "1613", + "location_id" : "3", + "plan_id" : "51" +}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/list_sizes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,128 @@ +[ + { + "plan_id": "31", + "plan": "VR256", + "ram": "256MB", + "disk": "10GB", + "transfer": "200GB", + "price": "10.00", + "available": "1167" + }, + { + "plan_id": "41", + "plan": "VR384", + "ram": "384MB", + "disk": "15GB", + "transfer": "300GB", + "price": "15.00", + "available": "768" + }, + { + "plan_id": "51", + "plan": "VR512", + "ram": "512MB", + "disk": "20GB", + "transfer": "400GB", + "price": "20.00", + "available": "620" + }, + { + "plan_id": "61", + "plan": "VR768", + "ram": "768MB", + "disk": "30GB", + "transfer": "600GB", + "price": "30.00", + "available": "403" + }, + { + "plan_id": "71", + "plan": "VR1024", + "ram": "1024MB", + "disk": "40GB", + "transfer": "800GB", + "price": "40.00", + "available": "304" + }, + { + "plan_id": "81", + "plan": "VR1280", + "ram": "1280MB", + "disk": "50GB", + "transfer": "1000GB", + "price": "50.00", + "available": "234" + }, + { + "plan_id": "91", + "plan": "VR1536", + "ram": "1536MB", + "disk": "60GB", + "transfer": "1200GB", + "price": "60.00", + "available": "190" + }, + { + "plan_id": "101", + "plan": "VR2048", + "ram": "2048MB", + "disk": "80GB", + "transfer": "1600GB", + "price": "80.00", + "available": "138" + }, + { + "plan_id": "128", + "plan": "VRBL1G", + "ram": "1024MB", + "disk": "50GB", + "transfer": "1000GB", + "price": "150.00", + "available": "34" + }, + { + "plan_id": "111", + "plan": "VR4048", + "ram": "4048MB", + "disk": "160GB", + "transfer": "3200GB", + "price": "160.00", + "available": "60" + }, + { + "plan_id": "137", + "plan": "VRBL2G", + "ram": "2048MB", + "disk": "100GB", + "transfer": "2000GB", + "price": "200.00", + "available": "16" + }, + { + "plan_id": "146", + "plan": "VRBL4G", + "ram": "4048MB", + "disk": "150GB", + "transfer": "3000GB", + "price": "300.00", + "available": "8" + }, + { + "plan_id": "119", + "plan": "VR8096", + "ram": "8096MB", + "disk": "320GB", + "transfer": "6400GB", + "price": "320.00", + "available": "11" + }, + { + "plan_id": "155", + "plan": "VRBL8G", + "ram": "8096MB", + "disk": "200GB", + "transfer": "5000GB", + "price": "400.00", + "available": "4" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_destroy.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_destroy.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_destroy.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_destroy.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "status" : "success" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_reboot.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_reboot.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_reboot.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_reboot.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "status" : "success" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_start.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_start.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_start.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_start.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "status" : "success" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_stop.json libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_stop.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/hostvirtual/node_stop.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/hostvirtual/node_stop.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "status" : "success" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/allocate_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/allocate_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/allocate_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/allocate_address.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +

RESERVED14129279520001223PRIMARY0user@domain.com
\ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/attach_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/attach_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/attach_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/attach_volume.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/create_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/create_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/create_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/create_volume.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +3929314120001208user@domain.comtest-volumeraw25602012-08-20T12:25:02.792Z2011-08-12T00:00:00.000ZUSD0.001CNT020 \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/create.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/create.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/create.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/create.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +28558128558RationalInsight4woodser@us.ibm.com11LARGEMyPublicKey02010-04-19T10:03:34.327-04:002010-04-26T10:03:43.610-04:00SUSE Linux Enterprise10 SP2OS diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/delete_address.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/delete_address.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/delete_address.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/delete_address.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/delete.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/delete.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/delete.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/delete.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/destroy_image.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/destroy_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/destroy_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/destroy_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/destroy_volume.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/detach_volume.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/detach_volume.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/detach_volume.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/detach_volume.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/images.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/images.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/images.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/images.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ + +2fd2d0478b132490897526b9b4433a334Rational Build Forge Agent11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00384e900960c3d4b648fa6d4670aed2cd1SUSE 10 SP211SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2SuSE v10.2 Base OS Imagehttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0015a72d3e7bb1cb4942ab0da2968e2e77bbWebSphere Application Server and Rational Agent Controller11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere Application Server and Rational Agent Controller enables a performance based foundation to build, reuse, run, integrate and manage Service Oriented Architecture (SOA) applications and services.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00117da905ba0fdf4d8b8f94e7f4ef43c1beRational Insight11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Insight helps organizations reduce time to market, improve quality, and take greater control of software and systems development and delivery. It provides objective dashboards and best practice metrics to identify risks, status, and trends.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/GettingStarted.htmlLARGE2009-04-25T00:00:00.000-04:0018edf7ad43f75943b1b0c0f915dba8d86cDB2 Express-C11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2DB2 Express-C is an entry-level edition of the DB2 database server for the developer community. It has standard relational functionality and includes pureXML, and other features of DB2 for Linux, Unix, and Windows (LUW).https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0021c03be6800bf043c0b44c584545e04099Informix Dynamic Server Developer Edition11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Informix Dynamic Server (IDS) Developer Edition is a development version of the IDS Enterprise Edition. IDS is designed to meet the database server needs of small-size to large-size enterprise businesses.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00229b2b6482ba374a6ab4bb3585414a910aWebSphere sMash with AppBuilder11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere sMashĀ® provides a web platform that includes support for dynamic scripting in PHP and Groovy.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:001000150416662e71fae44bdba4d7bb502a09c5e7DB2 Enterprise V9.7 (32-bit, 90-day trial)11leonsp@ca.ibm.comPUBLICi386SuSE v10.2DB2 Enterprise V9.7 (32-bit, 90-day trial)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-09T17:01:28.000-05:00100020639da8863714964624b8b13631642c785bRHEL 5.4 Base OS11youngdj@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Red Hat Enterprise Linux 5.4 Base OShttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-18T13:51:12.000-05:0010002573e5f09a64667e4faeaf3ac661600ec6caRational Build Forge11leighw@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-08T16:34:37.000-05:00100030563e276d758ed842caafe77770d60dedeaRational Asset Manager 7.2.0.111gmendel@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Asset Manager helps to create, modify, govern, find and reuse development assets, including SOA and systems development assets. It facilitates the reuse of all types of software development related assets, potentially saving development time.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-14T14:30:57.000-05:0010003854e3067f999edf4914932295cfb5f79d59WebSphere Portal/WCM 6.1.511mlamb@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBMĀ® WebSphereĀ® Portal Server enables you to quickly consolidate applications and content into role-based applications, complete with search, personalization, and security capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-12T18:06:29.000-05:00100038640112efd8f1e144998f2a70a165d00bd3Rational Quality Manager11brownms@gmail.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Quality Manager provides a collaborative application lifecycle management (ALM) environment for test planning, construction, and execution.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T09:40:12.000-05:00100038653fbf6936e5cb42b5959ad9837add054fIBM Mashup Center with IBM Lotus Widget Factory11mgilmore@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM Mashup Center is an end-to-end enterprise mashup platform, supporting rapid assembly of dynamic web applications with the management, security, and governance capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T10:44:24.000-05:0010003780425e2dfef95647498561f98c4de356abRational Team Concert11sonia_dimitrov@ca.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Team Concert is a collaborative software delivery environment that empowers project teams to simplify, automate and govern software delivery.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/GettingStarted.htmlMEDIUMLARGE2010-01-19T14:13:58.000-05:0010003785c4867b72f2fc43fe982e76c76c32efaaLotus Forms Turbo 3.5.111rlintern@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Lotus Forms Turbo requires no training and is designed to help customers address basic form software requirements such as surveys, applications, feedback, orders, request for submission, and more - without involvement from the IT department.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/GettingStarted.htmlLARGE2010-01-22T13:27:08.000-05:0010005598Rational Requirements Composer11mutdosch@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Requirements Composer helps teams define and use requirements effectively across the project lifecycle.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/GettingStarted.htmlMEDIUMLARGE2010-02-08T11:43:18.000-05:0010007509Rational Software Architecture11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 2/22/10 8:06 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/GettingStarted.htmlLARGE2010-02-22T20:03:18.000-05:0010008319WebSphere Feature Pack for OSGi Apps and JPA 2.011radavenp@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM WebSphere Application Server V7.0 Fix Pack 7, Feature Pack for OSGi Applications and Java Persistence API 2.0 Open Beta, and Feature Pack for Service Component Architecture (SCA) V1.0.1 Fix Pack V1.0.1.1https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-03-14T21:06:38.000-04:0010008273Rational Software Architect for WebSphere11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 3/15/10 12:21 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/GettingStarted.htmlLARGE2010-03-15T12:17:26.000-04:0010008404Rational Application Developer11khiamt@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2An Eclipse-based IDE with visual development features that helps Java developers rapidly design, develop, assemble, test, profile and deploy high quality Java/J2EE, Portal, Web/Web 2.0, Web services and SOA applications. (03/16/2010)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/GettingStarted.htmlMEDIUMLARGE2010-03-16T00:10:30.000-04:00 \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/instances_deleted.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/instances_deleted.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/instances_deleted.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/instances_deleted.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/instances.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/instances.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/instances.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/instances.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28193128193RAD instancewoodser@us.ibm.com10008404MEDIUMasdff22010-04-15T15:20:10.317-04:002010-04-22T15:20:19.564-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_addresses.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_addresses.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_addresses.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_addresses.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +
RESERVED170.225.160.218vhost021814129279520001223PRIMARY2user@dmain.com
\ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_storage_offerings.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_storage_offerings.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_storage_offerings.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_storage_offerings.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +2000120861Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000120882Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000120961Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000120982Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000120841Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000120941Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001208121Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001209121Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000121061Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000121082Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR2000121041Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001210121Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001208101Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001209101Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001208141Small256256EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001209141Medium512512EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001210101Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR20001210141Large20482048EXT3RAW2011-08-12T00:00:00.000ZUSD0.00001UHR \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_volumes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_volumes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/list_volumes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/list_volumes.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +39281141200012100user@domain.comlibcloudvolraw204842012-08-19T13:46:50.000Z2011-08-12T00:00:00.000ZUSD0.001CNT020 \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/locations.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/locations.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/locations.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/locations.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +41Raleigh, U.S.AThis data center is located in Raleigh, North Carolina, U.S.A. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.RTPext3rawArea2gpfs2 of SONAS1Area1gpfs1 of SONAS1161Ehningen, GermanyThis data center is located in Ehningen(near Baden-Wurttemberg), Germany. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.EHNext3rawArea2gpfs2 of SONAS1Area1gpfs1 of SONAS1182Boulder1, U.S.AThis data center is located in Boulder(near Denver), Colorado, U.S.A. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.us-co-dc1ext3raw1101Markham, CanadaThis data center is located in Markham(near Toronto), Ontario, Canada. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.ca-on-dc1ext3raw1121Makuhari, JapanThis data center is located in Makuhari(near Tokoyo), Japan. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.ap-jp-dc1ext3raw1141Singapore, SingaporeThis data center is located in Singapore. The services provided are: Guest Instances, Image Capture, Persistent Storage, Reserved IP, Private VLAN/VPN.ap-sg-dc1ext3raw1 \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/reboot_active.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/reboot_active.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/reboot_active.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/reboot_active.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/sizes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/sizes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/ibm_sce/sizes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ibm_sce/sizes.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_datasets.json libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_datasets.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_datasets.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_datasets.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +[{"id":"f953e97e-4991-11e1-9ea4-27c6e7e8afda","urn":"sdc:sdc:nodejs:1.3.3","name":"nodejs","os":"smartos","type":"smartmachine","description":"Node.js git-deploy PaaS dataset","default":false,"requirements":{},"version":"1.3.3","created":"2012-02-14T03:54:01+00:00"},{"id":"71101322-43a5-11e1-8f01-cf2a3031a7f4","urn":"sdc:sdc:ubuntu-10.04:1.0.1","name":"ubuntu-10.04","os":"linux","type":"virtualmachine","description":"Ubuntu 10.04 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-22T18:27:32+00:00"},{"id":"7a4f84be-df6d-11e0-a504-3f6609d83831","urn":"sdc:admin:windows2008r2:1.5.0","name":"windows2008r2","os":"windows","type":"virtualmachine","description":"Windows 2008r2 Enterprise Edition","default":false,"requirements":{"max_memory":32768,"min_memory":4096},"version":"1.5.0","created":"2012-02-16T19:31:01+00:00"},{"id":"e4cd7b9e-4330-11e1-81cf-3bb50a972bda","urn":"sdc:sdc:centos-6:1.0.1","name":"centos-6","os":"linux","type":"virtualmachine","description":"Centos 6 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-15T20:04:18+00:00"},{"id":"988c2f4e-4314-11e1-8dc3-2bc6d58f4be2","urn":"sdc:sdc:centos-5.7:1.2.1","name":"centos-5.7","os":"linux","type":"virtualmachine","description":"Centos 5.7 VM 1.2.1","default":false,"requirements":{},"version":"1.2.1","created":"2012-02-14T05:53:49+00:00"},{"id":"e6ac6784-44b3-11e1-8555-87c3dd87aafe","urn":"sdc:sdc:debian-6.03:1.0.0","name":"debian-6.03","os":"linux","type":"virtualmachine","description":"Debian 6.03 VM 1.0.0","default":false,"requirements":{},"version":"1.0.0","created":"2012-02-14T05:21:53+00:00"},{"id":"3f8a3d02-43e4-11e1-9565-7f82a075e289","urn":"sdc:sdc:fedora-14:1.0.1","name":"fedora-14","os":"linux","type":"virtualmachine","description":"Fedora 14 VM 1.0.1","default":false,"requirements":{},"version":"1.0.1","created":"2012-02-14T05:20:52+00:00"},{"id":"d239389c-7535-11e1-b60a-6f75edc139df","urn":"sdc:sdc:mongodb:1.2.4","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.2.4","created":"2012-03-23T22:33:31+00:00"},{"id":"98f38e14-6f83-11e1-bc32-2b9d0a8b6759","urn":"sdc:sdc:mongodb:1.1.1","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.1.1","created":"2012-03-16T16:54:14+00:00"},{"id":"64d81cee-689e-11e1-a130-232647306089","urn":"local:admin:stingray-standard-1gbps:1.2.0","name":"stingray-standard-1gbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Hi-Throughput SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:40+00:00"},{"id":"29a2fb18-689d-11e1-a2a5-47b01f708bb0","urn":"local:admin:stingray-enterprise-1gbps:1.2.0","name":"stingray-enterprise-1gbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Enterprise Hi-Throughput SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:12+00:00"},{"id":"27750b5c-689c-11e1-a67e-6331aba2c777","urn":"local:admin:stingray-enterprise-200mbps:1.2.0","name":"stingray-enterprise-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager Enterprise SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:19+00:00"},{"id":"56ffd3bc-689b-11e1-837f-735e255247ac","urn":"local:admin:stingray-standard-200mbps:1.2.0","name":"stingray-standard-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Traffic Manager SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:49+00:00"},{"id":"c79e581a-689a-11e1-91f3-932bbde56874","urn":"local:admin:stingray-lb-200mbps:1.2.0","name":"stingray-lb-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Load Balancer SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:26+00:00"},{"id":"4ca85e3a-689a-11e1-a5df-1b5ffe7065e9","urn":"local:admin:stingray-simple-lb-200mbps:1.2.0","name":"stingray-simple-lb-200mbps","os":"smartos","type":"smartmachine","description":"Riverbed Stingray v8.1 (formerly Zeus) Simple Load Balancer SmartMachine","default":false,"requirements":{},"version":"1.2.0","created":"2012-03-09T01:49:33+00:00"},{"id":"8c4c0f30-66df-11e1-a3f4-0f8e0a382b60","urn":"sdc:sdc:percona:1.3.9","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.3.9","created":"2012-03-05T16:41:01+00:00"},{"id":"618d80f8-66d5-11e1-998e-e384c47940f0","urn":"sdc:sdc:mongodb:1.0.9","name":"mongodb","os":"smartos","type":"smartmachine","description":"MongoDB SmartMachine","default":false,"requirements":{},"version":"1.0.9","created":"2012-03-05T15:29:21+00:00"},{"id":"a54da3a0-6319-11e1-a3d9-9fdedd2f9e17","urn":"sdc:sdc:riak:1.5.6","name":"riak","os":"smartos","type":"smartmachine","description":"Riak SmartMachine template","default":false,"requirements":{},"version":"1.5.6","created":"2012-03-01T16:18:13+00:00"},{"id":"81641caa-6321-11e1-a79a-731161c6d519","urn":"local:admin:riakeds:1.5.6","name":"riakeds","os":"smartos","type":"smartmachine","description":"Riak EDS SmartMachine template","default":false,"requirements":{},"version":"1.5.6","created":"2012-03-01T19:52:37+00:00"},{"id":"f4bc70ca-5e2c-11e1-8380-fb28785857cb","urn":"sdc:sdc:smartosplus64:3.1.0","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.1.0","created":"2012-03-02T15:30:58+00:00"},{"id":"a963d5d0-5e29-11e1-a4d7-a31977b1e6dd","urn":"sdc:sdc:smartosplus:3.1.0","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.1.0","created":"2012-03-02T15:24:10+00:00"},{"id":"31bc4dbe-5e06-11e1-907c-5bed6b255fd1","urn":"sdc:sdc:smartos64:1.5.4","name":"smartos64","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.5.4","created":"2012-03-02T15:20:17+00:00"},{"id":"489754f2-5e01-11e1-8ff8-f770c2116b0d","urn":"sdc:sdc:smartos:1.5.4","name":"smartos","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.5.4","created":"2012-03-02T15:16:12+00:00"},{"id":"e05dbcac-1d44-11e1-b8ab-bf1bc04c2d65","urn":"sdc:sdc:smartosplus64:3.0.7","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.7","created":"2012-02-13T19:18:56+00:00"},{"id":"fcc5996a-1d34-11e1-899e-7bd98b87947a","urn":"sdc:sdc:smartosplus:3.0.7","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.7","created":"2012-02-13T19:22:05+00:00"},{"id":"5fef6eda-05f2-11e1-90fc-13dac5e4a347","urn":"sdc:sdc:percona:1.2.2","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.2.2","created":"2012-02-13T19:23:12+00:00"},{"id":"34359ccc-21d2-2e4e-87e8-69fb36412008","urn":"sdc:sdc:windows2008r2standard:1.5.1","name":"windows2008r2standard","os":"windows","type":"virtualmachine","description":"windows2008r2standard VM image","default":false,"requirements":{"max_memory":32768,"min_memory":4096},"version":"1.5.1","created":"2012-03-13T18:25:53+00:00"},{"id":"a9380908-ea0e-11e0-aeee-4ba794c83c33","urn":"sdc:sdc:percona:1.0.7","name":"percona","os":"smartos","type":"smartmachine","description":"Percona SmartMachine","default":false,"requirements":{},"version":"1.0.7","created":"2012-02-13T19:24:17+00:00"},{"id":"df3589dc-df9a-11e0-a3a3-07ceee3e7d54","urn":"sdc:sdc:smartosplus64:3.0.4","name":"smartosplus64","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.4","created":"2012-02-13T19:27:27+00:00"},{"id":"aded640a-df98-11e0-b050-1f55ff3ddfa7","urn":"sdc:sdc:smartosplus:3.0.4","name":"smartosplus","os":"smartos","type":"smartmachine","description":"Generic multi-purpose SmartMachine template","default":false,"requirements":{},"version":"3.0.4","created":"2012-02-13T19:30:28+00:00"},{"id":"3fcf35d2-dd79-11e0-bdcd-b3c7ac8aeea6","urn":"sdc:sdc:mysql:1.4.1","name":"mysql","os":"smartos","type":"smartmachine","description":"MySQL SmartMachine","default":false,"requirements":{},"version":"1.4.1","created":"2012-02-13T19:32:51+00:00"},{"id":"141194fa-dd77-11e0-8539-27dd8d8264b8","urn":"sdc:sdc:smartos64:1.4.7","name":"smartos64","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.4.7","created":"2012-02-13T19:33:21+00:00"},{"id":"f8ea0bb8-dd75-11e0-87c3-af5352ad3bd6","urn":"sdc:sdc:smartos:1.4.7","name":"smartos","os":"smartos","type":"smartmachine","description":"Base template to build other templates on","default":false,"requirements":{},"version":"1.4.7","created":"2012-02-13T19:33:50+00:00"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_machines_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_machines_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_machines_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_machines_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc","type":"virtualmachine","state":"provisioning","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.129","10.112.1.130"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abcd== Joyent SSH\n"},"created":"2012-04-11T04:08:27+00:00","updated":"2012-04-11T04:08:27+00:00"} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_machines.json libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_machines.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_machines.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_machines.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ +[{"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc1","type":"virtualmachine","state":"running","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.129","10.112.1.130"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abc== JoyentSSH\n"},"created":"2012-04-11T04:08:32+00:00","updated":"2012-04-11T04:08:42+00:00"}, +{"id":"2fb67f5f-53f2-40ab-9d99-b9ff68cfb2ab","name":"testlc2","type":"virtualmachine","state":"running","dataset":"sdc:sdc:ubuntu-10.04:1.0.1","ips":["165.225.129.128","10.112.1.131"],"memory":1024,"disk":30720,"metadata":{"root_authorized_keys":"ssh-rsa abc== Joyent SSH\n", "credentials": {"root": "abc"}},"created":"2012-04-11T04:08:32+00:00","updated":"2012-04-11T04:08:42+00:00"}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_packages.json libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_packages.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/joyent/my_packages.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/joyent/my_packages.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +[{"name":"Large 16GB","memory":16384,"disk":491520,"vcpus":3,"swap":32768,"default":false},{"name":"XL 8GB High CPU","memory":8192,"disk":245760,"vcpus":4,"swap":16384,"default":false},{"name":"XL 32GB","memory":32768,"disk":778240,"vcpus":4,"swap":65536,"default":false},{"name":"XXL 48GB","memory":49152,"disk":1048576,"vcpus":8,"swap":98304,"default":false},{"name":"XXXL 64GB ","memory":65536,"disk":1572864,"vcpus":12,"swap":131072,"default":false},{"name":"Medium 1GB High-CPU","memory":1024,"disk":61440,"vcpus":2,"swap":2048,"default":false},{"name":"Small 1GB","memory":1024,"disk":30720,"vcpus":1,"swap":2048,"default":true},{"name":"Medium 2GB","memory":2048,"disk":61440,"vcpus":1,"swap":4096,"default":false},{"name":"Medium 4GB","memory":4096,"disk":122880,"vcpus":1,"swap":8192,"default":false},{"name":"Large 8GB","memory":8192,"disk":245760,"vcpus":2,"swap":16384,"default":false}] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17164,"id":2602} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail2.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17177,"id":2602} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/deployVirtualMachine_deployfail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"errorcode" : 431, "errortext" : "Unable to find service offering: 104"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/destroyVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "destroyvirtualmachineresponse" : {"jobid":17166} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"listavailableproducttypesresponse":{"count":112,"producttypes":[{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-829-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-829-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-829-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-829-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-829-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-829-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-829-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-829-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Centos 5.4 32bit","templateid":"829","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-830-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-830-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-830-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-830-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-830-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-830-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-830-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-830-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Centos 5.4 64bit","templateid":"830","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-867-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-867-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-867-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-867-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-867-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-867-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-867-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-867-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Debian 6 Squeeze 32bit","templateid":"867","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-880-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-880-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-880-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-880-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-880-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-880-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-880-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-880-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Debian 6 Squeeze 64bit","templateid":"880","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-881-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-881-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-881-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-881-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-881-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-881-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-881-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-881-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Fedora release 13 64 bit","templateid":"881","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-877-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-877-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-877-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-877-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-877-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-877-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-877-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-877-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 11.04 32bit","templateid":"877","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-878-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-878-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-878-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-878-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-878-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-878-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-878-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-878-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 11.04 64bit","templateid":"878","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-988-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-988-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-988-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-988-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-988-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-988-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-988-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-988-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2003 R2 32bit","templateid":"988","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-986-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-986-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-986-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-986-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-986-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-986-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-986-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-986-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2003 ENT 32bit","templateid":"986","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-989-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-989-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-989-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-989-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-989-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-989-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-989-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-989-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 ENT 32bit","templateid":"989","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-990-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-990-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-990-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-990-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-990-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-990-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-990-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-990-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 R2 64bit","templateid":"990","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-991-12-25","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"25","productid":"1-991-13-25","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-991-14-26","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"26","productid":"1-991-15-26","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-991-16-27","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"27","productid":"1-991-17-27","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-991-18-28","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"28","productid":"1-991-36-28","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"WIN 2008 R2 Mssql","templateid":"991","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1111-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1111-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1111-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1111-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1111-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1111-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1111-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1111-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 10.04 32bit","templateid":"1111","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1109-12-38","productstate":"available","serviceofferingdesc":"standard 1vCore 1GB RAM","serviceofferingid":"12","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"100GB","diskofferingid":"38","productid":"1-1109-13-38","productstate":"available","serviceofferingdesc":"standard 1vCore 2GB RAM","serviceofferingid":"13","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1109-14-39","productstate":"available","serviceofferingdesc":"standard 2vCore 2GB RAM","serviceofferingid":"14","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"200GB","diskofferingid":"39","productid":"1-1109-15-39","productstate":"available","serviceofferingdesc":"standard 2vCore 4GB RAM","serviceofferingid":"15","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1109-16-40","productstate":"available","serviceofferingdesc":"standard 4vCore 4GB RAM","serviceofferingid":"16","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"300GB","diskofferingid":"40","productid":"1-1109-17-40","productstate":"available","serviceofferingdesc":"standard 4vCore 8GB RAM","serviceofferingid":"17","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1109-18-41","productstate":"available","serviceofferingdesc":"standard 8vCore 8GB RAM","serviceofferingid":"18","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"},{"diskofferingdesc":"400GB","diskofferingid":"41","productid":"1-1109-36-41","productstate":"available","serviceofferingdesc":"standard 8vCore 16GB RAM","serviceofferingid":"36","templatedesc":"Ubuntu 10.04 64bit","templateid":"1109","zonedesc":"MOCKDONG","zoneid":"1"}]}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_nodisk.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1 @@ +{"listavailableproducttypesresponse": {"count": 2, "producttypes": [{"diskofferingdesc": "100GB", "templatedesc": "Centos 5.4 32bit", "serviceofferingdesc": "premium 2vCore 2GB RAM", "productstate": "available", "product": "Standard", "zoneid": "eceb5d65-6571-4696-875f-5a17949f3317", "zonedesc": "KOR-Central A", "templateid": "d2241244-0b6c-444d-b075-72cfcd562826", "diskofferingid": "cc85e4dd-bfd9-4cec-aa22-cf226c1da92f", "serviceofferingid": "94341d94-ccd4-4dc4-9ccb-05c0c632d0b4", "productid": "eceb5d65-6571-4696-875f-5a17949f3317-d2241244-0b6c-444d-b075-72cfcd562826-94341d94-ccd4-4dc4-9ccb-05c0c632d0b4-cc85e4dd-bfd9-4cec-aa22-cf226c1da92f"}, {"diskofferingdesc": "20GB", "templatedesc": "Centos 5.4 32bit", "serviceofferingdesc": "premium 2vCore 2GB RAM", "productstate": "available", "product": "Standard", "zoneid": "eceb5d65-6571-4696-875f-5a17949f3317", "zonedesc": "KOR-Central A", "templateid": "d2241244-0b6c-444d-b075-72cfcd562826", "serviceofferingid": "94341d94-ccd4-4dc4-9ccb-05c0c632d0b4", "productid": "eceb5d65-6571-4696-875f-5a17949f3317-d2241244-0b6c-444d-b075-72cfcd562826-94341d94-ccd4-4dc4-9ccb-05c0c632d0b4-0"}]}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listAvailableProductTypes_notemplates.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listavailableproducttypesresponse" : { "count" : 0, "producttypes" : [] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listPortForwardingRules_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listportforwardingrulesresponse" : {"count": 1, "portforwardingrule": [{"protocol": "tcp", "virtualmachineid": "7d8de712-aa7a-4901-a8b1-fd223f0ca459", "ipaddress": "178.170.71.253", "cidrlist": "", "tags": [], "ipaddressid": "50cd9456-d4db-4a48-8cf5-950dba8d2fdb", "virtualmachinedisplayname": "yoyo", "privateendport": "22", "state": "Active", "publicendport": "22", "privateport": "22", "virtualmachinename": "yoyo", "publicport": "22", "id": "4644652a-7573-4e50-aafb-48a171c9bcb2"}]}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listPublicIpAddresses_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listpublicipaddressesresponse" : { "publicipaddress" : [ {"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-23T05:20:39+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33999,"ipaddress":"1.1.1.48","allocated":"2011-06-23T05:20:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33998,"ipaddress":"1.1.1.47","allocated":"2011-06-23T05:20:30+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33970,"ipaddress":"1.1.1.19","allocated":"2011-06-20T04:08:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":true,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listVirtualMachines_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listvirtualmachinesresponse" : { "virtualmachine" : [ {"id":2600,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:06:42+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"1.78%","networkkbsread":2,"networkkbswrite":2,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3891,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.116","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2601,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:09:44+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"jobid":17147,"jobstatus":0,"nic":[{"id":3892,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.203","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listZones_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listZones_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/listZones_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/listZones_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17164.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17164,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17165.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17165,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17166.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Destroyed","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_17177.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17177,"jobstatus":2} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_attachvolumejob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.AttachVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","deviceid":5,"virtualmachineid":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmname":"ab2c18f6-00a6-43f8-9fe0-efecb3165dd7","vmdisplayname":"gre-kickstart","vmstate":"Running","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Ready","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"KVM","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering 1","diskofferingdisplaytext":"Disk offering 1 display name","storage":"Shared Storage CL01","attached":"2012-06-05T09:17:38+0200","destroyed":false,"isextractable":false}},"created":"2012-06-05T09:17:38+0200","jobid":"e07d6b9b-2b6c-45bd-840b-3c4c3d890168"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/queryAsyncJobResult_createvolumejob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"be7d76b3-8823-49c0-86e1-29efd9ea1eb0","userid":"a8bd3087-edc1-4e94-8470-6830404b7292","cmd":"com.cloud.api.commands.CreateVolumeCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"volume":{"id":"60338035-92fb-4d27-98d4-b60ad4b38b87","name":"vol-0","zoneid":"58624957-a150-46a3-acbf-4088776161e5","zonename":"EQ-AMS2-Z01","type":"DATADISK","size":10737418240,"created":"2012-06-05T08:47:54+0200","state":"Allocated","account":"admin","domainid":"bfc35f83-8589-4e93-9150-d57e8479f772","domain":"ROOT","storagetype":"shared","hypervisor":"None","diskofferingid":"6345e3b7-227e-4209-8f8c-1f94219696e6","diskofferingname":"Disk offering","diskofferingdisplaytext":"Disk offering display name","storage":"none","destroyed":false,"isextractable":false}},"created":"2012-06-05T08:47:54+0200","jobid":"35416f6d-1b5b-4ceb-a7d4-aab0deede71b"} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/ktucloud/rebootVirtualMachine_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "rebootvirtualmachineresponse" : {"jobid":17165} } diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_datacenters.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_datacenters.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_datacenters.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_datacenters.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "LOCATION": "Dallas, TX, USA", + "DATACENTERID": 2, + "ABBR": "dallas" + }, + { + "LOCATION": "Fremont, CA, USA", + "DATACENTERID": 3, + "ABBR": "fremont" + }, + { + "LOCATION": "Atlanta, GA, USA", + "DATACENTERID": 4, + "ABBR": "atlanta" + }, + { + "LOCATION": "Newark, NJ, USA", + "DATACENTERID": 6, + "ABBR": "newark" + }, + { + "LOCATION": "London, England, UK", + "DATACENTERID": 7, + "ABBR": "london" + }, + { + "LOCATION": "Tokyo, JP", + "DATACENTERID": 8, + "ABBR": "tokyo" + } + ], + "ACTION": "avail.datacenters" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_distributions.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_distributions.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_distributions.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_distributions.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,246 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 112, + "IS64BIT": 1, + "LABEL": "Arch Linux 2013.06", + "MINIMAGESIZE": 500, + "CREATE_DT": "2013-06-06 02:45:11.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 89, + "IS64BIT": 1, + "LABEL": "CentOS 6.2", + "MINIMAGESIZE": 800, + "CREATE_DT": "2011-07-19 11:38:20.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 78, + "IS64BIT": 1, + "LABEL": "Debian 6", + "MINIMAGESIZE": 550, + "CREATE_DT": "2011-02-08 16:54:31.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 109, + "IS64BIT": 1, + "LABEL": "Debian 7", + "MINIMAGESIZE": 660, + "CREATE_DT": "2013-05-08 11:31:32.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 114, + "IS64BIT": 1, + "LABEL": "Fedora 19", + "MINIMAGESIZE": 750, + "CREATE_DT": "2013-08-26 15:29:21.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 53, + "IS64BIT": 1, + "LABEL": "Gentoo", + "MINIMAGESIZE": 1000, + "CREATE_DT": "2009-04-04 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 115, + "IS64BIT": 1, + "LABEL": "openSUSE 12.3", + "MINIMAGESIZE": 1024, + "CREATE_DT": "2013-09-19 10:49:09.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 87, + "IS64BIT": 1, + "LABEL": "Slackware 13.37", + "MINIMAGESIZE": 600, + "CREATE_DT": "2011-06-05 15:11:59.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 65, + "IS64BIT": 1, + "LABEL": "Ubuntu 10.04 LTS", + "MINIMAGESIZE": 450, + "CREATE_DT": "2010-04-29 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 99, + "IS64BIT": 1, + "LABEL": "Ubuntu 12.04 LTS", + "MINIMAGESIZE": 600, + "CREATE_DT": "2012-04-26 17:25:16.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 111, + "IS64BIT": 1, + "LABEL": "Ubuntu 13.04", + "MINIMAGESIZE": 770, + "CREATE_DT": "2013-05-08 11:31:32.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 113, + "IS64BIT": 0, + "LABEL": "Arch Linux 2013.06 32bit", + "MINIMAGESIZE": 500, + "CREATE_DT": "2013-06-06 02:45:11.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 88, + "IS64BIT": 0, + "LABEL": "CentOS 6.2 32bit", + "MINIMAGESIZE": 800, + "CREATE_DT": "2011-07-19 11:38:20.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 77, + "IS64BIT": 0, + "LABEL": "Debian 6 32bit", + "MINIMAGESIZE": 550, + "CREATE_DT": "2011-02-08 16:54:31.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 108, + "IS64BIT": 0, + "LABEL": "Debian 7 32bit", + "MINIMAGESIZE": 660, + "CREATE_DT": "2013-05-08 11:31:32.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 72, + "IS64BIT": 0, + "LABEL": "Gentoo 32bit", + "MINIMAGESIZE": 1000, + "CREATE_DT": "2010-09-13 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 86, + "IS64BIT": 0, + "LABEL": "Slackware 13.37 32bit", + "MINIMAGESIZE": 600, + "CREATE_DT": "2011-06-05 15:11:59.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 64, + "IS64BIT": 0, + "LABEL": "Ubuntu 10.04 LTS 32bit", + "MINIMAGESIZE": 450, + "CREATE_DT": "2010-04-29 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 98, + "IS64BIT": 0, + "LABEL": "Ubuntu 12.04 LTS 32bit", + "MINIMAGESIZE": 600, + "CREATE_DT": "2012-04-26 17:25:16.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 110, + "IS64BIT": 0, + "LABEL": "Ubuntu 13.04 32bit", + "MINIMAGESIZE": 770, + "CREATE_DT": "2013-05-08 11:31:32.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 105, + "IS64BIT": 1, + "LABEL": "Arch Linux 2012.10", + "MINIMAGESIZE": 500, + "CREATE_DT": "2012-10-22 15:00:49.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 60, + "IS64BIT": 1, + "LABEL": "CentOS 5.6 64bit", + "MINIMAGESIZE": 950, + "CREATE_DT": "2009-08-17 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 100, + "IS64BIT": 1, + "LABEL": "Fedora 17", + "MINIMAGESIZE": 800, + "CREATE_DT": "2012-05-31 16:03:49.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 97, + "IS64BIT": 1, + "LABEL": "openSUSE 12.1", + "MINIMAGESIZE": 1000, + "CREATE_DT": "2012-04-13 11:43:30.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 107, + "IS64BIT": 1, + "LABEL": "Ubuntu 12.10", + "MINIMAGESIZE": 660, + "CREATE_DT": "2012-11-06 11:51:25.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 104, + "IS64BIT": 0, + "LABEL": "Arch Linux 2012.10 32bit", + "MINIMAGESIZE": 500, + "CREATE_DT": "2012-10-22 15:00:49.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 59, + "IS64BIT": 0, + "LABEL": "CentOS 5.6 32bit", + "MINIMAGESIZE": 950, + "CREATE_DT": "2009-08-17 00:00:00.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 101, + "IS64BIT": 0, + "LABEL": "Fedora 17 32bit", + "MINIMAGESIZE": 800, + "CREATE_DT": "2012-05-31 16:03:49.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 96, + "IS64BIT": 0, + "LABEL": "openSUSE 12.1 32bit", + "MINIMAGESIZE": 1000, + "CREATE_DT": "2012-04-13 11:43:30.0" + }, + { + "REQUIRESPVOPSKERNEL": 1, + "DISTRIBUTIONID": 106, + "IS64BIT": 0, + "LABEL": "Ubuntu 12.10 32bit", + "MINIMAGESIZE": 660, + "CREATE_DT": "2012-11-06 11:51:25.0" + } + ], + "ACTION": "avail.distributions" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_kernels.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_kernels.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_kernels.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_kernels.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,146 @@ +{ + "ERRORARRAY": [], + "ACTION": "avail.kernels", + "DATA": [ + { + "LABEL": "Latest 2.6 Stable (2.6.18.8-linode19)", + "ISXEN": 1, + "KERNELID": 60 + }, + { + "LABEL": "2.6.18.8-linode19", + "ISXEN": 1, + "KERNELID": 103 + }, + { + "LABEL": "2.6.30.5-linode20", + "ISXEN": 1, + "KERNELID": 105 + }, + { + "LABEL": "Latest 2.6 Stable (2.6.18.8-x86_64-linode7)", + "ISXEN": 1, + "KERNELID": 107 + }, + { + "LABEL": "2.6.18.8-x86_64-linode7", + "ISXEN": 1, + "KERNELID": 104 + }, + { + "LABEL": "2.6.30.5-x86_64-linode8", + "ISXEN": 1, + "KERNELID": 106 + }, + { + "LABEL": "pv-grub-x86_32", + "ISXEN": 1, + "KERNELID": 92 + }, + { + "LABEL": "pv-grub-x86_64", + "ISXEN": 1, + "KERNELID": 95 + }, + { + "LABEL": "Recovery - Finnix (kernel)", + "ISXEN": 1, + "KERNELID": 61 + }, + { + "LABEL": "2.6.18.8-domU-linode7", + "ISXEN": 1, + "KERNELID": 81 + }, + { + "LABEL": "2.6.18.8-linode10", + "ISXEN": 1, + "KERNELID": 89 + }, + { + "LABEL": "2.6.18.8-linode16", + "ISXEN": 1, + "KERNELID": 98 + }, + { + "LABEL": "2.6.24.4-linode8", + "ISXEN": 1, + "KERNELID": 84 + }, + { + "LABEL": "2.6.25-linode9", + "ISXEN": 1, + "KERNELID": 88 + }, + { + "LABEL": "2.6.25.10-linode12", + "ISXEN": 1, + "KERNELID": 90 + }, + { + "LABEL": "2.6.26-linode13", + "ISXEN": 1, + "KERNELID": 91 + }, + { + "LABEL": "2.6.27.4-linode14", + "ISXEN": 1, + "KERNELID": 93 + }, + { + "LABEL": "2.6.28-linode15", + "ISXEN": 1, + "KERNELID": 96 + }, + { + "LABEL": "2.6.28.3-linode17", + "ISXEN": 1, + "KERNELID": 99 + }, + { + "LABEL": "2.6.29-linode18", + "ISXEN": 1, + "KERNELID": 101 + }, + { + "LABEL": "2.6.16.38-x86_64-linode2", + "ISXEN": 1, + "KERNELID": 85 + }, + { + "LABEL": "2.6.18.8-x86_64-linode1", + "ISXEN": 1, + "KERNELID": 86 + }, + { + "LABEL": "2.6.27.4-x86_64-linode3", + "ISXEN": 1, + "KERNELID": 94 + }, + { + "LABEL": "2.6.28-x86_64-linode4", + "ISXEN": 1, + "KERNELID": 97 + }, + { + "LABEL": "2.6.28.3-x86_64-linode5", + "ISXEN": 1, + "KERNELID": 100 + }, + { + "LABEL": "2.6.29-x86_64-linode6", + "ISXEN": 1, + "KERNELID": 102 + }, + { + "LABEL": "3.9.3-x86-linode52", + "ISXEN": 1, + "KERNELID": 137 + }, + { + "LABEL": "3.9.3-x86_64-linode33", + "ISXEN": 1, + "KERNELID": 138 + } + ] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,125 @@ +{ + "ERRORARRAY": [], + "DATA": [{ + "PRICE": 20.00, + "RAM": 2048, + "XFER": 3000, + "PLANID": 1, + "LABEL": "Linode 2048", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 48 + }, { + "PRICE": 40.00, + "RAM": 4096, + "XFER": 4000, + "PLANID": 3, + "LABEL": "Linode 4096", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 96 + }, { + "PRICE": 80.00, + "RAM": 8192, + "XFER": 8000, + "PLANID": 5, + "LABEL": "Linode 8192", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 192 + }, { + "PRICE": 160.00, + "RAM": 16384, + "XFER": 16000, + "PLANID": 6, + "LABEL": "Linode 16384", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 384 + }, { + "PRICE": 320.00, + "RAM": 32768, + "XFER": 20000, + "PLANID": 7, + "LABEL": "Linode 32768", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 768 + }, { + "PRICE": 480.00, + "RAM": 49152, + "XFER": 20000, + "PLANID": 8, + "LABEL": "Linode 49152", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1152 + }, { + "PRICE": 640.00, + "RAM": 65536, + "XFER": 20000, + "PLANID": 9, + "LABEL": "Linode 65536", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1536 + }, { + "PRICE": 960.00, + "RAM": 98304, + "XFER": 20000, + "PLANID": 11, + "LABEL": "Linode 98304", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1920 + }], + "ACTION": "avail.linodeplans" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_batch.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_batch.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_batch.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_batch.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,22 @@ +[ + { + "ERRORARRAY": [], + "DATA": [ + { + "IPADDRESSID": 5384, + "RDNS_NAME": "li22-54.members.linode.com", + "LINODEID": 8098, + "ISPUBLIC": 1, + "IPADDRESS": "66.228.43.47" + }, + { + "IPADDRESSID": 5575, + "RDNS_NAME": "li22-245.members.linode.com", + "LINODEID": 8098, + "ISPUBLIC": 1, + "IPADDRESS": "75.127.96.245" + } + ], + "ACTION": "linode.ip.list" + } +] diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_linode_ip_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_linode_ip_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_linode_ip_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_linode_ip_list.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "ACTION": "linode.ip.list", + "DATA": [ + { + "IPADDRESS": "66.228.43.47", + "IPADDRESSID": 5384, + "ISPUBLIC": 1, + "LINODEID": 8098, + "RDNS_NAME": "li22-54.members.linode.com" + }, + { + "IPADDRESS": "75.127.96.245", + "IPADDRESSID": 5575, + "ISPUBLIC": 1, + "LINODEID": 8098, + "RDNS_NAME": "li22-245.members.linode.com" + } + ], + "ERRORARRAY": [] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_linode_list.json libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_linode_list.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/linode/_linode_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/linode/_linode_list.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,33 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "ALERT_CPU_ENABLED": 1, + "ALERT_BWIN_ENABLED": 1, + "ALERT_BWQUOTA_ENABLED": 1, + "BACKUPWINDOW": 0, + "ALERT_DISKIO_THRESHOLD": 1000, + "DISTRIBUTIONVENDOR": "Debian", + "WATCHDOG": 1, + "DATACENTERID": 6, + "STATUS": 1, + "ALERT_DISKIO_ENABLED": 1, + "CREATE_DT": "2012-05-04 19:31:30.0", + "TOTALHD": 49152, + "ALERT_BWQUOTA_THRESHOLD": 80, + "TOTALRAM": 2048, + "ALERT_BWIN_THRESHOLD": 5, + "LINODEID": 8098, + "ALERT_BWOUT_THRESHOLD": 5, + "ALERT_BWOUT_ENABLED": 1, + "BACKUPSENABLED": 1, + "ALERT_CPU_THRESHOLD": 90, + "PLANID": "1", + "BACKUPWEEKLYDAY": 0, + "LABEL": "api-node3", + "LPM_DISPLAYGROUP": "test", + "TOTALXFER": 3000 + } + ], + "ACTION": "linode.list" +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/meta/helloworld.txt libcloud-0.15.1/libcloud/test/compute/fixtures/meta/helloworld.txt --- libcloud-0.5.0/libcloud/test/compute/fixtures/meta/helloworld.txt 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/meta/helloworld.txt 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +Hello, World! \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/misc/dummy_rsa libcloud-0.15.1/libcloud/test/compute/fixtures/misc/dummy_rsa --- libcloud-0.5.0/libcloud/test/compute/fixtures/misc/dummy_rsa 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/misc/dummy_rsa 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAs0ya+QTUpUyAxbFWN81CbW23D7Fm8O1wxP3l0UPu9OO/dAES +irxNxbBEanTGb8HMdaLEdLBlXaYAIlf8+YhG+c9o7kKe8kCR3j4hJ3x0x/fTVSTf +mNQc7XIUaM9tuCGj/fO2zfn3fD5fztWAwssPm1+cyP3pAgvc/H03SNpdQG05ylZ+ +1I2QYymYtbjzGh9Nen6dN/aSDrZI7yIA1o3hsDoiY2Nb82l958UI3uJKaxGeBSpO +Mshutar3gWa/v9F6uqHDTmFEqQdvQGdCHHyWuz98jMVUc0kvWjdH5q5X95CBZFQM +uOQPNxn2aYjMaP7pU2jvfrU0sLpWT/tG8ZApJwIDAQABAoIBAECotZJr7YuW5TFl +3GPPP89aq5ObNDZaSjqgHUq0Ju5lW1uoL1mcwqcyA9ooNo+C4griIdq66jM1dALu +nCoYvQ/Ffl+94rgWFQSCf05QEYUzmCCyZXgltjDi3P1XIIgwiYVBaIErTdaeX8ql +MAQPWpd7iXzqJCc6w/zB4zgAl3Rt1Fb8GBFHlYf3VTpiU9LA5/IG04GoPk80OgiW +98lercisWT+nPrTMDu2GoEqqls8OkM9CcT5AgeXIpSF9nPmQgUQWXoqWkrZhD+eQ +mOxCqpqzwkW/JdsUaBqhPAJtK/eBHTPAfsOabQ5G6/Un1HejN0GTIR0GJzTSEOvi +blM3YuECgYEA53XL8c8S5ckc1BGfM22obY1daAVgFqsNpqVSV4OKKnRlcmtYaUXq +61vrQN/OG9Ogrr7FuL7HwavJnr3IbT8uET1/pUje/NQviERwSZWiNX++GUCSXUvq +hSe9LZb3ezTEkUROdGXOfl+TfI/bhojsk6egaqqKAVv8LR92cwzMD28CgYEAxk8T +x278NOLUn+F6ije186xpcI0JV+NydhXk40ViDHc7M2ycHNArc6iJzQIlQjkEDejK +yae3c3QMVtszwONSd6wPkPr9bLbiiT0UlG5gpGAGyEyYZjMQukg2e8ImnwMVMm2l +bJsrDI5CRq4G20CWPDqxzs8FTuX78tX4uewzJckCgYBmi1a2o8JAkZA3GDOLClsj +Zgzq5+7BPDlJCldntDxDQNwtDAfYZASHD2szi7P5uhGnOZfJmKRRVnV14ajpVaNo +OfHSXW2FX9BLM973itaZkyW6dFQkB104bvmuOAMez6sCnNuRUAVjEZ77AZUFjqYZ +aJt2hmWr4n/f0d+dax8A+wKBgEVV7LJ0KZZMIM9txKyC4gk6pPsHNNcX3TNQYGDe +J3P4VCicttCUMD0WFmgpceF/kd1TIWP0Uf35+z57EdNFJ9ZTwHWObAEQyI/3XTSw +ivWt5XEu5rIE9LpM+U+4CEzchRLGp2obrqeLLb0Mp7UNFfolA3otg8ucOcUj7v0C +ireRAoGAMM5MDDtWyduLH9srxC3PBKdD4Hi8dtzkQ9yAFYTJ0HB4vV7MmIZ2U2j7 +x2KTrPc/go/Jm7+UOmVa4LNkdRvXxVOlAxH85Hqr+n74mm/dWcS4dDWrZvL+Sn+l +GFa29M3Ix5SnlfFkZhijvTFLICC7XPTRj6uqVHscZVfENhAYGoU= +-----END RSA PRIVATE KEY----- diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/misc/dummy_rsa.pub libcloud-0.15.1/libcloud/test/compute/fixtures/misc/dummy_rsa.pub --- libcloud-0.5.0/libcloud/test/compute/fixtures/misc/dummy_rsa.pub 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/misc/dummy_rsa.pub 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzTJr5BNSlTIDFsVY3zUJtbbcPsWbw7XDE/eXRQ+704790ARKKvE3FsERqdMZvwcx1osR0sGVdpgAiV/z5iEb5z2juQp7yQJHePiEnfHTH99NVJN+Y1BztchRoz224IaP987bN+fd8Pl/O1YDCyw+bX5zI/ekCC9z8fTdI2l1AbTnKVn7UjZBjKZi1uPMaH016fp039pIOtkjvIgDWjeGwOiJjY1vzaX3nxQje4kprEZ4FKk4yyG61qveBZr+/0Xq6ocNOYUSpB29AZ0IcfJa7P3yMxVRzSS9aN0fmrlf3kIFkVAy45A83GfZpiMxo/ulTaO9+tTSwulZP+0bxkCkn dummycomment diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_images.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_images.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_images.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_images.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,243 @@ +{ + "success": true, + "total_count": 18, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 5.5 32-bit", + "uri": "https://api.nephoscale.com/image/server/3/", + "max_memory": 128, + "id": 3, + "is_default": true, + "create_time": "2010-12-20 14:25:36", + "architecture": "x86", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 5.5 64-bit", + "uri": "https://api.nephoscale.com/image/server/5/", + "max_memory": 128, + "id": 5, + "is_default": true, + "create_time": "2010-12-20 14:25:36", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Windows Server 2008 64-bit", + "uri": "https://api.nephoscale.com/image/server/21/", + "max_memory": 128, + "id": 21, + "is_default": true, + "create_time": "2010-12-20 14:25:36", + "architecture": "x86_64", + "base_type": "windows" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian Server 5.05 32-bit", + "uri": "https://api.nephoscale.com/image/server/23/", + "max_memory": 128, + "id": 23, + "is_default": true, + "create_time": "2010-12-20 16:51:20", + "architecture": "x86", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian Server 5.05 64-bit", + "uri": "https://api.nephoscale.com/image/server/25/", + "max_memory": 128, + "id": 25, + "is_default": true, + "create_time": "2010-12-20 16:55:42", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Windows Server 2003 Enterprise 64-bit", + "uri": "https://api.nephoscale.com/image/server/33/", + "max_memory": 128, + "id": 33, + "is_default": true, + "create_time": "2011-03-02 14:20:49", + "architecture": "x86_64", + "base_type": "windows" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 5.7 64-bit", + "uri": "https://api.nephoscale.com/image/server/41/", + "max_memory": 128, + "id": 41, + "is_default": true, + "create_time": "2011-09-19 17:30:04", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Ubuntu Server 10.04 LTS 32-bit", + "uri": "https://api.nephoscale.com/image/server/43/", + "max_memory": 128, + "id": 43, + "is_default": true, + "create_time": "2011-10-01 02:26:17", + "architecture": "x86", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 5.7 32-bit", + "uri": "https://api.nephoscale.com/image/server/45/", + "max_memory": 128, + "id": 45, + "is_default": true, + "create_time": "2011-10-05 19:41:30", + "architecture": "x86", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Ubuntu Server 10.04 LTS 64-bit", + "uri": "https://api.nephoscale.com/image/server/49/", + "max_memory": 128, + "id": 49, + "is_default": true, + "create_time": "2011-10-08 05:01:10", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian Server 6.0.3 64-bit", + "uri": "https://api.nephoscale.com/image/server/51/", + "max_memory": 128, + "id": 51, + "is_default": true, + "create_time": "2011-10-08 19:54:41", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian 5.0.9 64-bit", + "uri": "https://api.nephoscale.com/image/server/55/", + "max_memory": 128, + "id": 55, + "is_default": false, + "create_time": "2011-10-13 12:53:36", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian 5.0.9 32-bit", + "uri": "https://api.nephoscale.com/image/server/57/", + "max_memory": 128, + "id": 57, + "is_default": false, + "create_time": "2011-10-13 12:55:09", + "architecture": "x86", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 6.2 64-bit", + "uri": "https://api.nephoscale.com/image/server/59/", + "max_memory": 128, + "id": 59, + "is_default": true, + "create_time": "2011-10-15 17:11:34", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux CentOS 5.8 64-bit", + "uri": "https://api.nephoscale.com/image/server/64/", + "max_memory": 128, + "id": 64, + "is_default": true, + "create_time": "2012-03-28 19:54:10", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Ubuntu Server 12.04 LTS 64-bit", + "uri": "https://api.nephoscale.com/image/server/75/", + "max_memory": 128, + "id": 75, + "is_default": true, + "create_time": "2012-05-18 08:41:03", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "cloud", + "is_active": true, + "friendly_name": "VOD Cloud Storage Proxy (FTP:HTTP)", + "uri": "https://api.nephoscale.com/image/server/101/", + "max_memory": 128, + "id": 101, + "is_default": false, + "create_time": "2012-08-30 08:49:55", + "architecture": "x86_64", + "base_type": "linux" + }, + { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Debian 7.1 64-bit", + "uri": "https://api.nephoscale.com/image/server/177/", + "max_memory": 128, + "id": 177, + "is_default": true, + "create_time": "2013-09-10 16:12:10", + "architecture": "x86_64", + "base_type": "linux" + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_keys.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "success": true, + "total_count": 2, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "name": "mistio-ssh", + "key_group": 1, + "uri": "https://api.nephoscale.com/key/sshrsa/72209/", + "key_type": 2, + "create_time": "2013-10-02 07:24:37", + "id": 72209 + }, + { + "name": "mistio-testing", + "key_group": 4, + "uri": "https://api.nephoscale.com/key/password/72211/", + "key_type": 1, + "create_time": "2013-10-02 07:27:10", + "id": 72211 + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_locations.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_locations.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_locations.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_locations.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "success": true, + "total_count": 2, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "datacenter": { + "airport_code": "SJC", + "name": "SJC-1", + "uri": "https://api.nephoscale.com/datacenter/1/", + "id": 1 + }, + "uri": "https://api.nephoscale.com/datacenter/zone/86945/", + "id": 86945, + "name": "SJC-1" + }, + { + "datacenter": { + "airport_code": "RIC", + "name": "RIC-1", + "uri": "https://api.nephoscale.com/datacenter/3/", + "id": 3 + }, + "uri": "https://api.nephoscale.com/datacenter/zone/87729/", + "id": 87729, + "name": "RIC-1" + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_nodes.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_nodes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_nodes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_nodes.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,161 @@ +{ + "success": true, + "total_count": 2, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "server_keys": [ + { + "key_type": 2, + "key_group": 1, + "id": 71757, + "uri": "https://api.nephoscale.com/key/sshrsa/71157/" + } + ], + "name": "mongodb-staging", + "zone": { + "uri": "https://api.nephoscale.com/datacenter/zone/88211/", + "id": 87729, + "name": "RIC-1" + }, + "image": { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Ubuntu Server 10.04 LTS 64-bit", + "uri": "https://api.nephoscale.com/image/server/49/", + "max_memory": 128, + "id": 49, + "is_default": true, + "create_time": "2011-10-08 05:01:10", + "architecture": "x86_64", + "has_agent": true, + "base_type": "linux" + }, + "hostname": "mongodb-staging", + "podzone": "P1A2", + "uri": "https://api.nephoscale.com/server/cloud/87241/", + "ipaddresses": "198.89.117.16", + "power_status": "on", + "create_time": "2013-09-25 07:38:53", + "postinit_state": 1, + "console_keys": [ + { + "key_type": 1, + "key_group": 4, + "id": 71761, + "uri": "https://api.nephoscale.com/key/password/71761/" + } + ], + "memory": 512, + "service_type": { + "sku": { + "name": "CS05", + "description": "Cloud Server 0.5 GB RAM, 1 Core" + }, + "uri": "https://api.nephoscale.com/server/type/cloud/5/", + "friendly_name": "CS05 - 0.5GB, 1Core, 25GB", + "id": 5, + "billable_type": 1 + }, + "network_ports": [ + { + "macaddress": "00:16:3e:06:dc:41", + "devname": "eth0", + "network_domain": { + "domain_type": 0, + "name": "default_public_network_RIC" + } + }, + { + "macaddress": "00:16:3e:06:dc:45", + "devname": "eth1", + "network_domain": { + "domain_type": 1, + "name": "default_private_network_RIC" + } + } + ], + "id": 88241, + "is_console_enabled": true + }, + { + "server_keys": [ + { + "key_type": 2, + "key_group": 1, + "id": 72049, + "uri": "https://api.nephoscale.com/key/sshrsa/72049/" + } + ], + "name": "backup-server2", + "zone": { + "uri": "https://api.nephoscale.com/datacenter/zone/88751/", + "id": 87729, + "name": "RIC-1" + }, + "image": { + "max_cpu": 64, + "deployable_type": "both", + "is_active": true, + "friendly_name": "Linux Debian Server 6.0.3 64-bit", + "uri": "https://api.nephoscale.com/image/server/51/", + "max_memory": 128, + "id": 51, + "is_default": true, + "create_time": "2011-10-08 19:54:41", + "architecture": "x86_64", + "has_agent": true, + "base_type": "linux" + }, + "hostname": "backup-server2", + "podzone": "P1A2", + "uri": "https://api.nephoscale.com/server/cloud/88751/", + "ipaddresses": "198.89.112.115", + "power_status": "on", + "create_time": "2013-10-02 05:02:50", + "postinit_state": 1, + "console_keys": [ + { + "key_type": 1, + "key_group": 4, + "id": 72165, + "uri": "https://api.nephoscale.com/key/password/72165/" + } + ], + "memory": 512, + "service_type": { + "sku": { + "name": "CS05", + "description": "Cloud Server 0.5 GB RAM, 1 Core" + }, + "uri": "https://api.nephoscale.com/server/type/cloud/5/", + "friendly_name": "CS05 - 0.5GB, 1Core, 25GB", + "id": 5, + "billable_type": 1 + }, + "network_ports": [ + { + "macaddress": "00:16:3e:06:f5:2f", + "devname": "eth0", + "network_domain": { + "domain_type": 0, + "name": "default_public_network_RIC" + } + }, + { + "macaddress": "00:16:3e:06:f5:33", + "devname": "eth1", + "network_domain": { + "domain_type": 1, + "name": "default_private_network_RIC" + } + } + ], + "id": 88751, + "is_console_enabled": true + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_password_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_password_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_password_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_password_keys.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "success": true, + "total_count": 1, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "name": "mistio-testing", + "key_group": 4, + "uri": "https://api.nephoscale.com/key/password/72211/", + "key_type": 1, + "create_time": "2013-10-02 07:27:10", + "password": "23d493j5", + "id": 72211 + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_sizes.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_sizes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_sizes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_sizes.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,178 @@ +{ + "success": true, + "total_count": 13, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "sku": { + "name": "CS16.16", + "description": "Cloud Server 16 GB RAM, 16 Cores" + }, + "storage": 800, + "ram": 16384, + "friendly_name": "CS16.16 - 16GB, 16Core, 800GB", + "uri": "https://api.nephoscale.com/server/type/cloud/1/", + "vcpus": 16, + "id": 1, + "billable_type": 1 + }, + { + "sku": { + "name": "CS1", + "description": "Cloud Server 1 GB RAM, 1 Core" + }, + "storage": 50, + "ram": 1024, + "friendly_name": "CS1 - 1GB, 1Core, 50GB", + "uri": "https://api.nephoscale.com/server/type/cloud/3/", + "vcpus": 1, + "id": 3, + "billable_type": 1 + }, + { + "sku": { + "name": "CS05", + "description": "Cloud Server 0.5 GB RAM, 1 Core" + }, + "storage": 25, + "ram": 512, + "friendly_name": "CS05 - 0.5GB, 1Core, 25GB", + "uri": "https://api.nephoscale.com/server/type/cloud/5/", + "vcpus": 1, + "id": 5, + "billable_type": 1 + }, + { + "sku": { + "name": "CS2.2", + "description": "Cloud Server 2 GB RAM, 2 Cores" + }, + "storage": 100, + "ram": 2048, + "friendly_name": "CS2.2 - 2GB, 2Core, 100GB", + "uri": "https://api.nephoscale.com/server/type/cloud/7/", + "vcpus": 2, + "id": 7, + "billable_type": 1 + }, + { + "sku": { + "name": "CS4.4", + "description": "Cloud Server 4 GB RAM, 4 Cores" + }, + "storage": 200, + "ram": 4096, + "friendly_name": "CS4.4 - 4GB, 4Core, 200GB", + "uri": "https://api.nephoscale.com/server/type/cloud/9/", + "vcpus": 4, + "id": 9, + "billable_type": 1 + }, + { + "sku": { + "name": "CS8.8", + "description": "Cloud Server 8 GB RAM, 8 Cores" + }, + "storage": 400, + "ram": 8192, + "friendly_name": "CS8.8 - 8GB, 8Core, 400GB", + "uri": "https://api.nephoscale.com/server/type/cloud/11/", + "vcpus": 8, + "id": 11, + "billable_type": 1 + }, + { + "sku": { + "name": "CS025", + "description": "Cloud Server 0.25 GB RAM" + }, + "storage": 15, + "ram": 256, + "friendly_name": "CS025 - 0.25GB, 10GB", + "uri": "https://api.nephoscale.com/server/type/cloud/27/", + "vcpus": 1, + "id": 27, + "billable_type": 1 + }, + { + "sku": { + "name": "CS2.1", + "description": "Cloud Server 2 GB RAM, 1 Core" + }, + "storage": 75, + "ram": 2048, + "friendly_name": "CS2.1 - 2GB, 1Core, 75GB", + "uri": "https://api.nephoscale.com/server/type/cloud/46/", + "vcpus": 1, + "id": 46, + "billable_type": 1 + }, + { + "sku": { + "name": "CS4.2", + "description": "Cloud Server 4 GB RAM, 2 Cores" + }, + "storage": 150, + "ram": 4096, + "friendly_name": "CS4.2 - 4GB, 2Core, 150GB", + "uri": "https://api.nephoscale.com/server/type/cloud/48/", + "vcpus": 2, + "id": 48, + "billable_type": 1 + }, + { + "sku": { + "name": "CS8.4", + "description": "Cloud Server 8 GB RAM, 4 Cores" + }, + "storage": 300, + "ram": 8192, + "friendly_name": "CS8.4 - 8GB, 4Core, 300GB", + "uri": "https://api.nephoscale.com/server/type/cloud/50/", + "vcpus": 4, + "id": 50, + "billable_type": 1 + }, + { + "sku": { + "name": "CS16.8", + "description": "Cloud Server 16 GB RAM, 8 Cores" + }, + "storage": 600, + "ram": 16384, + "friendly_name": "CS16.8 - 16GB, 8Core, 600GB", + "uri": "https://api.nephoscale.com/server/type/cloud/52/", + "vcpus": 8, + "id": 52, + "billable_type": 1 + }, + { + "sku": { + "name": "CS32.16", + "description": "Cloud Server 32 GB RAM, 16 Cores" + }, + "storage": 1200, + "ram": 32768, + "friendly_name": "CS32.16 - 32GB, 16Core, 1200GB", + "uri": "https://api.nephoscale.com/server/type/cloud/54/", + "vcpus": 16, + "id": 54, + "billable_type": 1 + }, + { + "sku": { + "name": "CS32.8", + "description": "Cloud Server 32 GB RAM, 8 Cores" + }, + "storage": 1000, + "ram": 32768, + "friendly_name": "CS32.8 - 32GB, 8Core, 1000GB", + "uri": "https://api.nephoscale.com/server/type/cloud/56/", + "vcpus": 8, + "id": 56, + "billable_type": 1 + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/list_ssh_keys.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "success": true, + "total_count": 1, + "subcode": 0, + "message": "Your request was processed successfully.", + "data": [ + { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBs+gQwoeFNa+4pYz2AKz5Op7EqrzeP3YsyTKxx7P9gt4aSt5w8Z+lRn3p3CVG+th5i6lZqOxWgCZ1kp2KEKNbSsA2HWl3OwkY8IqHGSEeMrF+3A2Ncz88kUIAWzCswxPY4uqb/yA4EzEQDk7PJj7Q1DruObhOm7qyHT40n2KJ3TqHJQlV9XE3RcXSaQcwUt0YFXFMx8wkgy0NKqqSiQuH8RofyfnOABEzKAARGbcQjZWxh2ITzUmwMxUCBa0X5wvblgcE6/pRZN5Xq6NQr2XEU5Z48+mLy6asdasdwrM0v10Y7ojDL/TosK/8T5+d5yaRsvtBlBstDZhNWY31n5iCLxx user@mistio", + "name": "mistio-ssh", + "key_group": 1, + "uri": "https://api.nephoscale.com/key/sshrsa/72209/", + "key_type": 2, + "create_time": "2013-10-02 07:24:37", + "id": 72209 + } + ], + "response": 200 +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/success_action.json libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/success_action.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/nephoscale/success_action.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/nephoscale/success_action.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "subcode": 0, + "message": "Your request was processed successfully.", + "data": { + "id": 141229, + "resource_type": "/job", + "uri": "https://api.nephoscale.com/job/141229/" + }, + "response": 202, + "success": true +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ + + + 15 + Compute 15 + ACTIVE + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_25.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + 25 + Compute 25 + none + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/compute_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ + + + 5 + Compute 5 + ACTIVE + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/computes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/computes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/computes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/computes.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/disk_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 15 + Ubuntu 9.04 LAMP + 2048 + file:///images/ubuntu/jaunty.img + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/disk_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 5 + Ubuntu 9.04 LAMP + 2048 + file:///images/ubuntu/jaunty.img + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/network_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 15 + Network 15 +
192.168.1.0
+ 256 +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/network_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 5 + Network 5 +
192.168.0.0
+ 256 +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/networks.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/networks.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/networks.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/networks.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/storage.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/storage.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_1_4/storage.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_1_4/storage.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,25 @@ + + + 15 + Compute 15 + small + ACTIVE + + + DISK + hda + + + + 192.168.0.2 + 02:00:c0:a8:00:02 + + + + 192.168.1.2 + 02:00:c0:a8:01:02 + + + compute-15 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_25.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + 25 + Compute 25 + none + none + + + 192.168.0.3 + 02:00:c0:a8:00:03 + + + + 192.168.1.3 + 02:00:c0:a8:01:03 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,25 @@ + + + 5 + Compute 5 + small + ACTIVE + + + DISK + hda + + + + 192.168.0.1 + 02:00:c0:a8:00:01 + + + + 192.168.1.1 + 02:00:c0:a8:01:01 + + + compute-5 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/compute_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 15 + Network 15 +
192.168.1.0
+ 256 +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 5 + Network 5 +
192.168.0.0
+ 256 +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/network_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + 15 + Ubuntu 9.04 LAMP + OS + Ubuntu 9.04 LAMP Description + 2048 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + 5 + Ubuntu 9.04 LAMP + OS + Ubuntu 9.04 LAMP Description + 2048 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_2_0/storage_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + 15 + Network 15 +
192.168.1.0
+ 256 + NO +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + 5 + Network 5 +
192.168.0.0
+ 256 + YES +
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_0/network_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_2/compute_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,25 @@ + + + 5 + Compute 5 + small + ACTIVE + + + DISK + hda + + + + 192.168.0.1 + 02:00:c0:a8:00:01 + + + + 192.168.1.1 + 02:00:c0:a8:01:01 + + + compute-5 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_2/instance_type_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + + small + 1 + 1024 + + + medium + 4 + 4096 + + + large + 8 + 8192 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + 15 + Compute 15 Test + small + ACTIVE + + + FILE + hda + + + + 192.168.122.2 + 02:00:c0:a8:7a:02 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/compute_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ + + + 5 + Compute 5 Test + small + ACTIVE + + + FILE + hda + + + + FILE + sda + + + + 192.168.122.2 + 02:00:c0:a8:7a:02 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_10.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 10 + Debian 7.1 LAMP + 2048 + file:///images/debian/wheezy.img + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/disk_15.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 15 + Debian Sid + 1024 + file:///images/debian/sid.img + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_6/storage_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,13 @@ + + 5 + test-volume + + oneadmin + READY + DATABLOCK + Attached storage + 1000 + ext3 + NO + YES + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_collection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_large.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + large + 8 + 8192 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_medium.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + medium + 4 + 4096 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opennebula_3_8/instance_type_small.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + small + 1 + 1024 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/300_multiple_choices.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/300_multiple_choices.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/300_multiple_choices.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/300_multiple_choices.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ +{"choices": [{"status": "CURRENT", "media-types": [{"base": +"application/xml", "type": +"application/vnd.openstack.compute+xml;version=2"}, {"base": +"application/json", "type": +"application/vnd.openstack.compute+json;version=2"}], "id": "v2.0", +"links": [{"href": "http://50.56.213.226:8774/v2/v2.0/images/detail", +"rel": "self"}]}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,42 @@ +{ + "auth": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14-06:00" + }, + "serviceCatalog": { + "cloudFilesCDN": [ + { + "region": "ORD", + "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", + "v1Default": true + }, + { + "region": "LON", + "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS", + "v1Default": false + } + ], + "cloudFiles": [ + { + "region": "ORD", + "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS", + "v1Default": true, + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS" + }, + { + "region": "LON", + "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS", + "v1Default": false, + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS" + } + ], + "cloudServers": [ + { + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/slug", + "v1Default": true + } + ] + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"auth":{"token":{"expires":"2011-09-18T02:44:17.000-05:00"},"serviceCatalog":{"cloudFilesCDN":[{"region":"ORD","publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS","v1Default":true}],"cloudFiles":[{"region":"ORD","publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS","v1Default":true,"internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS"}],"cloudServers":[{"publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/slug","v1Default":true}]}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"unauthorized":{"message":"Username or api key is invalid","code":401}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_detail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_images_post.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_limits.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_multiple_nodes.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + somevalue + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_detail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_ips.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ + + + + b + d + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers_no_admin_pass.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_servers.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_deployment.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,143 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "slug", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/slug", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "slug", + "publicURL": "https://127.0.0.1/v2/slug", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "613469", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/slug", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "ORD", + "tenantId": "613469", + "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/slug", + "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", + "versionList": "https://ord.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "IAD", + "tenantId": "613469", + "publicURL": "https://iad.servers.api.rackspacecloud.com/v2/slug", + "versionInfo": "https://iad.servers.api.rackspacecloud.com/v2/", + "versionList": "https://iad.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "SYD", + "tenantId": "613469", + "publicURL": "https://syd.servers.api.rackspacecloud.com/v2/slug", + "versionInfo": "https://syd.servers.api.rackspacecloud.com/v2/", + "versionList": "https://syd.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "HKG", + "tenantId": "613469", + "publicURL": "https://hkg.servers.api.rackspacecloud.com/v2/slug", + "versionInfo": "https://hkg.servers.api.rackspacecloud.com/v2/", + "versionList": "https://hkg.servers.api.rackspacecloud.com/", + "versionId": "2" + } + + + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "slug", + "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/slug" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,142 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "1337", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "1337", + "publicURL": "https://127.0.0.1/v2/1337", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "613469", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "ORD", + "tenantId": "613469", + "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", + "versionList": "https://ord.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "IAD", + "tenantId": "613469", + "publicURL": "https://iad.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://iad.servers.api.rackspacecloud.com/v2/", + "versionList": "https://iad.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "SYD", + "tenantId": "613469", + "publicURL": "https://syd.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://syd.servers.api.rackspacecloud.com/v2/", + "versionList": "https://syd.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "HKG", + "tenantId": "613469", + "publicURL": "https://hkg.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://hkg.servers.api.rackspacecloud.com/v2/", + "versionList": "https://hkg.servers.api.rackspacecloud.com/", + "versionId": "2" + } + + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "1337", + "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack/_v2_0__auth_lon.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,103 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn2.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "1337", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "1337", + "publicURL": "https://127.0.0.1/v2/1337", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "LON", + "tenantId": "613469", + "publicURL": "https://lon.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://lon.servers.api.rackspacecloud.com/v2/", + "versionList": "https://lon.servers.api.rackspacecloud.com/", + "versionId": "2" + } + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "LON", + "tenantId": "1337", + "publicURL": "https://preprod.lon.servers.api.rackspacecloud.com/v2/1337" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_7.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"flavor": {"rxtx_quota": 2500, "name": "15.5GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/7", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/7", "rel": "bookmark"}], "ram": 16384, "vcpus": 8, "rxtx_cap": 200, "swap": 0, "disk": 620, "id": 7}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_flavors_detail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"flavors": [{"rxtx_quota": 2500, "name": "15.5GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/7", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/7", "rel": "bookmark"}], "ram": 16384, "vcpus": 8, "rxtx_cap": 200, "swap": 0, "disk": 620, "id": 7}, {"rxtx_quota": 600, "name": "1GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/3", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/3", "rel": "bookmark"}], "ram": 1024, "vcpus": 1, "rxtx_cap": 30, "swap": 0, "disk": 40, "id": 3}, {"rxtx_quota": 150, "name": "256 slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/1", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/1", "rel": "bookmark"}], "ram": 256, "vcpus": 1, "rxtx_cap": 10, "swap": 0, "disk": 10, "id": 1}, {"rxtx_quota": 1200, "name": "2GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/4", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/4", "rel": "bookmark"}], "ram": 2048, "vcpus": 2, "rxtx_cap": 60, "swap": 0, "disk": 80, "id": 4}, {"rxtx_quota": 2500, "name": "30GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/8", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/8", "rel": "bookmark"}], "ram": 30720, "vcpus": 8, "rxtx_cap": 400, "swap": 0, "disk": 1200, "id": 8}, {"rxtx_quota": 2500, "name": "4GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/5", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/5", "rel": "bookmark"}], "ram": 4096, "vcpus": 2, "rxtx_cap": 100, "swap": 0, "disk": 160, "id": 5}, {"rxtx_quota": 300, "name": "512 slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/2", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}], "ram": 512, "vcpus": 1, "rxtx_cap": 20, "swap": 0, "disk": 20, "id": 2}, {"rxtx_quota": 2500, "name": "8GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/6", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/6", "rel": "bookmark"}], "ram": 8192, "vcpus": 4, "rxtx_cap": 150, "swap": 0, "disk": 320, "id": 6}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"floating_ip": {"instance_id": null, "ip": "10.3.1.42", "fixed_ip": null, "id": "09ea1784-2f81-46dc-8c91-244b4df75bde", "pool": "public"}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ip_pools.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"floating_ip_pools": [{"name": "public"}, {"name": "foobar"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_floating_ips.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"floating_ips": [{"instance_id": null, "ip": "10.3.1.42", "fixed_ip": null, "id": "09ea1784-2f81-46dc-8c91-244b4df75bde", "pool": "public"}, {"instance_id": "fcfc96da-19e2-40fd-8497-f29da1b21143", "ip": "10.3.1.1", "fixed_ip": "172.16.21.4", "id": "04c5336a-0629-4694-ba30-04b0bdfa88a4", "pool": "public"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_13.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"image": {"status": "ACTIVE", "updated": "2011-08-06T18:14:02Z", "name": "Windows 2008 SP2 x86 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/13", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/13", "rel": "bookmark"}, {"href": "http://10.13.136.245:9292/rs-reach-project/images/13", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T18:13:11Z", "minDisk": "5", "progress": 100, "minRam": "256", "id": "13", "metadata": {"os_type": "windows"}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_4949f9ee_2421_4c81_8b49_13119446008b.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"image": {"status": "SAVING", "updated": "2012-01-05T19:42:20Z", "name": "new_image", "links": [{"href": "http://127.0.0.1/v1.1/68/images/4949f9ee-2421-4c81-8b49-13119446008b", "rel": "self"}, {"href": "http://127.0.0.1/68/images/4949f9ee-2421-4c81-8b49-13119446008b", "rel": "bookmark"}, {"href": "http://10.13.136.213:9292/68/images/4949f9ee-2421-4c81-8b49-13119446008b", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2012-01-05T19:42:20Z", "minDisk": 0, "server": {"id": "a0fdd7b9-73e0-4e13-a01c-2bcb421be064", "links": [{"href": "http://127.0.0.1/v1.1/servers/a0fdd7b9-73e0-4e13-a01c-2bcb421be064", "rel": "self"}, {"href": "http://127.0.0.1/servers/a0fdd7b9-73e0-4e13-a01c-2bcb421be064", "rel": "bookmark"}]}, "progress": 25, "minRam": 0, "id": "4949f9ee-2421-4c81-8b49-13119446008b", "metadata": {"instance_uuid": "a0fdd7b9-73e0-4e13-a01c-2bcb421be064", "image_state": "creating", "user_id": "reach6", "instance_ref": "http://127.0.0.1/v1.1/servers/a0fdd7b9-73e0-4e13-a01c-2bcb421be064", "image_type": "snapshot", "backup_type": null}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_images_detail.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,366 @@ +{ + "images": [ + { + "status": "ACTIVE", + "updated": "2011-08-06T18:14:02Z", + "name": "Windows 2008 SP2 x86 (B24)", + "server" : { + "id": "52415800-8b69-11e0-9b19-734f335aa7b3", + "name": "test-server", + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/servers/52415800-8b69-11e0-9b19-734f335aa7b3" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734f335aa7b3" + } + ] + }, + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/13", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/13", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/13", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-06T18:13:11Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "13", + "metadata": { + "os_type": "windows" + } + }, + { + "status": "ACTIVE", + "updated": "2011-08-06T18:13:11Z", + "name": "Windows 2003 R2 x86 (B24)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/12", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/12", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/12", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-06T18:12:33Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "12", + "metadata": { + "os_type": "windows" + } + }, + { + "status": "ACTIVE", + "updated": "2011-08-06T16:27:56Z", + "name": "Windows 2008 SP2 x64 (B24)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/11", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/11", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/11", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-06T16:26:15Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "11", + "metadata": { + "os_type": "windows" + } + }, + { + "status": "ACTIVE", + "updated": "2011-08-06T16:26:14Z", + "name": "Windows 2008 R2 x64 (B24)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/10", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/10", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/10", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-06T16:24:51Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "10", + "metadata": { + "os_type": "windows" + } + }, + { + "status": "ACTIVE", + "updated": "2011-08-06T16:24:51Z", + "name": "Windows 2003 R2 x64 (B24)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/9", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/9", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/9", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-06T16:23:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "9", + "metadata": { + "os_type": "windows" + } + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:58:29Z", + "name": "Ubuntu Natty (11.04)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/8", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/8", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/8", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:58:20Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "8", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:58:19Z", + "name": "Ubuntu Lucid (10.04)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/7", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/7", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:58:14Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "7", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:58:14Z", + "name": "Fedora 15", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/6", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/6", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/6", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:58:01Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "6", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:58:00Z", + "name": "Fedora 14", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/5", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/5", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/5", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:57:47Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "5", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:57:47Z", + "name": "Debian Squeeze (6.0)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/4", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/4", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/4", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:57:41Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "4", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:57:40Z", + "name": "Debian Lenny (5.0)", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/3", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/3", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/3", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:57:30Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "3", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:57:30Z", + "name": "CentOS 6.0", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/2", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/2", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/2", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:57:20Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "2", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2011-08-05T22:56:20Z", + "name": "CentOS 5.6", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/1", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/1", + "rel": "bookmark" + }, + { + "href": "http://10.13.136.170:9292/rs-reach-project/images/1", + "type": "application/vnd.openstack.image", + "rel": "alternate" + } + ], + "created": "2011-08-05T22:56:03Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "id": "1", + "metadata": {} + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create_import.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "keypair": { + "fingerprint": "97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a", + "name": "key3", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzTJr5BNSlTIDFsVY3zUJtbbcPsWbw7XDE/eXRQ+704790ARKKvE3FsERqdMZvwcx1osR0sGVdpgAiV/z5iEb5z2juQp7yQJHePiEnfHTH99NVJN+Y1BztchRoz224IaP987bN+fd8Pl/O1YDCyw+bX5zI/ekCC9z8fTdI2l1AbTnKVn7UjZBjKZi1uPMaH016fp039pIOtkjvIgDWjeGwOiJjY1vzaX3nxQje4kprEZ4FKk4yyG61qveBZr+/0Xq6ocNOYUSpB29AZ0IcfJa7P3yMxVRzSS9aN0fmrlf3kIFkVAy45A83GfZpiMxo/ulTaO9+tTSwulZP+0bxkCkn dummycomment\n", + "user_id": "dbdf4c6cab0c4ae78bef0bcdb03c2440" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_create.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "80:f8:03:a7:8e:c1:c3:b1:7e:c5:8c:50:04:5e:1c:5b", + "name": "key0", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDPC4MDHBbUjeGZ4pK5svGxkFHJFdDatpMAYcW/fyDxsMbyiHnu\nUOxB0WJupUQd4tc7B8+MNOLzcZVQkUjIhhkb5qCbjcoOqzb59owtNCSi7TleaC6w\n15j1LJb3zdHVxEhGJ19I95DhOtiFRHp2Ik3bYV6p+uv0sQxfaqw3q5M3+QIDAQAB\nAoGAW2LqZfH9Bb7GSEUgnESmt8hKwSYW9KLHidCeFyNG6Ect2RlyMEWZsod4Gfxq\nb4KTm6Ob8XfagLeuv0wRQyklZUbyb4aurfn4hX0cpkxSPAVar8uG/0TJY1wswxfo\nkReZCq7CQFlt7w3Y1RHZyXo/inyAxohi393trVhIGAqdXp0CQQDt7/GeI5QWKjYj\nwe5kFTRowVJ+y61MP237Bz+YF5+pq28ikdLAMzdDOyd3LJTnBGJ/DK1ksfJDCSue\nEgdifYJrAkEA3sM1fRQB/PyyyCR1DcZGlOfc/OBCSG4aTMYOK+g0PnibKPj5wS6q\nuK8w1q+0CztpgKsmEtQ+H7H8Fva81S7wKwJANY7tNEuN6e9WgHYG00Byq6HYj/II\n8EDW4Mqg5ftrVSXhvkZUyi69IcUO/SRr4BR8l1yjKydjAPPvfYVRZDocQQJASHXr\nQkJt2yM/7IafZNmoP+ukIMW6CeF2wJ50IagoxmFo500FwOczNVwXYN5KjJTI3sfN\nXLaZdqnovHeKOTZJfQJAZ2HBnmgsLoFE6ONF492TXIs7JxJr8z4QUp1AXGUXcZmy\njuL3b9XW6K908Ev8uTSNzRo6TyGuYKGllp10K6A3bA==\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDPC4MDHBbUjeGZ4pK5svGxkFHJFdDatpMAYcW/fyDxsMbyiHnuUOxB0WJupUQd4tc7B8+MNOLzcZVQkUjIhhkb5qCbjcoOqzb59owtNCSi7TleaC6w15j1LJb3zdHVxEhGJ19I95DhOtiFRHp2Ik3bYV6p+uv0sQxfaqw3q5M3+Q== Generated by Nova\n", + "user_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_get_one.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1 @@ +{"keypair": {"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkRJ+f6VibLJJRPtuVM3g5ZCporVfoFJhRt8vcBbD4x/1h8OSBvWwuj9tKoZha0ijpGJIRW5HylRKYZPFL7gxlmqS9LM/lewx3c/fZItmP4kDYuXX2Dn9XwHFLS/bSy/JHVgnrHopHUH/2a57iUNe+QRrngEGz13N1S9If3EGDxIhZuO8S1BRLWK3SqtHjOQ6mWZOF6xAs3nwKaBNJTWVp6XUshzlcwWUA5nFysN9MVXX7t/J1qo+xcSAwt/ew8v6dZJcCQM+y30bQhPJzSN8LepN5tSTI4iEN0Y+LtNQDtCEYacr4qEFkAxj3CcSAeQVMaT/a7ps0xiHg9GnCbGsV Generated by Nova\n", "user_id": "1234", "name": "test-key-pair", "deleted": false, "created_at": "2013-12-07T15:17:16.000000", "updated_at": null, "fingerprint": "a9:55:e8:b8:49:45:7b:aa:a9:33:fb:97:86:79:2c:1b", "deleted_at": null, "id": 4567}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "22:0e:d6:f7:bd:5e:ee:49:cf:1f:10:d5:9c:a8:35:64", + "name": "key1", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/ePvJuMEOc90gidxWN+8lYekv+S6j8SJhcQRBjE5DVs/M+3VXyJTQc6fguUS9c7o8GZXpP/0dwbVa9y76HeZs6In+XE1egoUyz4zLHQ5jUepFeekChpSlo6yQWI2SHUxJOshqPLOEU1XlrwvN0h5FcXGVV0x6DJgLZuCRS7oIxQ== Generated by Nova\n" + } + }, + { + "keypair": { + "fingerprint": "5d:66:33:ae:99:0f:fb:cb:86:f2:bc:ae:53:99:b6:ed", + "name": "key2", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCz5sy4u8KwAPAMPr+4bEMlU6BwpSD6eZVokwMclojqIz9nKAvQD9AEw/6ok9Xsn0oixBrCoW2HYsXIiUziufzheoGsZIzuj3D7Rpbtrft53FtICe5UtQrOo3WJb8bvbzpDDd7xYlb9PpQTXoxInzjgBW+Ox6OODx2NazTk7PHZDQ== Generated by Nova\n" + } + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_keypairs_not_found.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1 @@ +{"itemNotFound": {"message": "The resource could not be found.", "code": 404}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1 @@ +{"networks": [{"cidr": "127.0.0.0/24", "id": "f13e5051-feea-416b-827a-1a0acc2dad14", "label": "test1"}, {"id": "00000000-0000-0000-0000-000000000000", "label": "public"}, {"id": "11111111-1111-1111-1111-111111111111", "label": "private"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_networks_POST.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1 @@ +{"network": {"cidr": "127.0.0.0/24", "id": "ef2143d4-2353-4e3c-b577-0de372411f42", "label": "test1"}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"quota_set": {"metadata_items": 10, "injected_file_content_bytes": 1000, "injected_files": 10, "volumes": 0, "instances": 25, "gigabytes": 500, "cores": 50, "ram": 102400, "id": "aTenantId", "floating_ips": 10}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_group_rules_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "security_group_rule": { + "from_port": 14, + "group": {}, + "id": 2, + "ip_protocol": "tcp", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "parent_group_id": 6, + "to_port": 16 + } +} + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "security_group": { + "description": "Test Security Group", + "id": 6, + "name": "test", + "rules": [], + "tenant_id": "68" + } +} + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_security_groups.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "security_groups": [ + { + "description": "default", + "id": 2, + "name": "default", + "rules": [], + "tenant_id": "68" + }, + { + "description": "FTP Client-Server - Open 20-21 ports", + "id": 4, + "name": "ftp", + "rules": [ + { + "from_port": 20, + "group": {}, + "id": 1, + "ip_protocol": "tcp", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "parent_group_id": 4, + "to_port": 21 + } + ], + "tenant_id": "68" + } + ] +} + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "snapshot": { + "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", + "display_name": "snap-001", + "display_description": "Daily backup", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "created_at": "2012-02-29T03:50:07Z" + } +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_create_rackspace.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "snapshot": { + "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", + "displayName": "snap-001", + "displayDescription": "Daily backup", + "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "createdAt": "2012-02-29T03:50:07Z" + } +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "snapshots": [ + { + "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", + "display_name": "snap-001", + "display_description": "Daily backup", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "created_at": "2012-02-29T03:50:07Z" + }, + { + "id": "e479997c-650b-40a4-9dfe-77655818b0d2", + "display_name": "snap-002", + "display_description": "Weekly backup", + "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "status": "available", + "size": 25, + "created_at": "2012-03-19T01:52:47Z" + } + ] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_snapshots_rackspace.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "snapshots": [ + { + "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", + "displayName": "snap-001", + "displayDescription": "Daily backup", + "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "createdAt": "2012-02-29T03:50:07Z" + }, + { + "id": "e479997c-650b-40a4-9dfe-77655818b0d2", + "displayName": "snap-002", + "displayDescription": "Weekly backup", + "volumeId": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "status": "available", + "size": 25, + "createdAt": "2012-03-19T01:52:47Z" + } + ] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "volume": { + "attachments": [ + { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } + ], + "availabilityZone": "nova", + "createdAt": "2013-06-24T11:20:13.000000", + "displayDescription": "", + "displayName": "test_volume_2", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "metadata": {}, + "size": 2, + "snapshotId": null, + "status": "in-use", + "volumeType": "None" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "volume": { + "attachments": [ + {} + ], + "availabilityZone": "nova", + "createdAt": "2013-06-28T12:22:39.616660", + "displayDescription": null, + "displayName": "test", + "id": "43b7db44-0497-40fa-b817-c906f13bbea3", + "metadata": {}, + "size": 1, + "snapshotId": null, + "status": "creating", + "volumeType": "None" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_os_volumes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,39 @@ +{ + "volumes": [ + { + "attachments": [ + { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } + ], + "availabilityZone": "nova", + "createdAt": "2013-06-24T11:20:13.000000", + "displayDescription": "", + "displayName": "test_volume_2", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "metadata": {}, + "size": 2, + "snapshotId": null, + "status": "available", + "volumeType": "None" + }, + { + "attachments": [ + {} + ], + "availabilityZone": "nova", + "createdAt": "2013-06-21T12:39:02.000000", + "displayDescription": "some description", + "displayName": "test_volume", + "id": "cfcec3bc-b736-4db5-9535-4c24112691b5", + "metadata": {}, + "size": 50, + "snapshotId": null, + "status": "available", + "volumeType": "None" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/README libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/README --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/README 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/README 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ +The json responses contained in this directory are copied directly from the +OpenStack 1.1 documentation at +http://docs.openstack.org/trunk/openstack-compute/developer/openstack-compute-api-1.1/ +as of this writing. + +The only exception is _os_quota_sets_aTenantId.json, which was captured (and +perturbed) via packet capture. diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "metadata" : { + "Server Label" : "Web Head 1", + "Image Version" : "2.1" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"server": {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"server": {"status": "ACTIVE", "updated": "2011-10-11T01:22:04Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "Bob", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12065_os_volume_attachments.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "volumeAttachment": { + "device": "/dev/vdb", + "id": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_12086_console_output.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "security_groups": [ + { + "description": "default", + "id": 2, + "name": "default", + "rules": [], + "tenant_id": "68" + }, + { + "description": "FTP Client-Server - Open 20-21 ports", + "id": 4, + "name": "ftp", + "rules": [ + { + "from_port": 20, + "group": {}, + "id": 1, + "ip_protocol": "tcp", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "parent_group_id": 4, + "to_port": 21 + } + ], + "tenant_id": "68" + } + ] +} + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "server": { + "status": "BUILD", + "updated": "2011-11-30T16:39:19Z", + "hostId": "", + "user_id": "reach6", + "name": "racktest", + "links": [ + { + "href": "http://127.0.0.1/v1.1/68/servers/39d04103-984b-4a52-b4ec-ffec452e284c", + "rel": "self" + }, + { + "href": "http://127.0.0.1/68/servers/39d04103-984b-4a52-b4ec-ffec452e284c", + "rel": "bookmark" + } + ], + "created": "2011-11-30T16:39:18Z", + "tenant_id": "68", + "image": { + "id": "fcf5582a-ad13-4d98-90a6-742116f1793c", + "links": [ + { + "href": "http://127.0.0.1/68/images/fcf5582a-ad13-4d98-90a6-742116f1793c", + "rel": "bookmark" + } + ] + }, + "addresses": {}, + "accessIPv4": "", + "accessIPv6": "", + "key_name": "devstack", + "progress": null, + "flavor": { + "id": "1", + "links": [ + { + "href": "http://127.0.0.1/68/flavors/1", + "rel": "bookmark" + } + ] + }, + "config_drive": "", + "id": "26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "metadata": { + "My Server Name" : "Apache1" + }, + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "testaz" + + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create_disk_config.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "server": { + "OS-DCF:diskConfig": "MANUAL", + "id": "26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "links": [ + { + "href": "http://127.0.0.1/v1.1/68/servers/26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "rel": "self" + }, + { + "href": "http://127.0.0.1/68/servers/26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "rel": "bookmark" + } + ], + "adminPass": "racktestvJq7d3" + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_create.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "server": { + "id": "26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "links": [ + { + "href": "http://127.0.0.1/v1.1/68/servers/26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "rel": "self" + }, + { + "href": "http://127.0.0.1/68/servers/26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", + "rel": "bookmark" + } + ], + "adminPass": "racktestvJq7d3" + } +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "servers": [] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail_ERROR_STATE.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,66 @@ +{ + "servers": [ + { + "status": "ERROR", + "updated": "2013-12-05T21:07:07Z", + "hostId": "2a4a12656a7a57c10188e4ea37f9e09dfb99e3d628f4064f97761e09", + "addresses": { + "pool": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:be:f5:87", + "version": 4, + "addr": "192.168.3.4", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://192.168.0.1:8774/v2/dd3eca3de72846948f5d6d975660d325/servers/2d05bd68-3fbb-4b47-9f38-c690a5d93e45", + "rel": "self" + }, + { + "href": "http://192.168.0.1:8774/dd3eca3de72846948f5d6d975660d325/servers/2d05bd68-3fbb-4b47-9f38-c690a5d93e45", + "rel": "bookmark" + } + ], + "key_name": "my_key", + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "error", + "OS-SRV-USG:launched_at": null, + "flavor": { + "id": "4", + "links": [ + { + "href": "http://192.168.0.1:8774/dd3eca3de72846948f5d6d975660d325/flavors/4", + "rel": "bookmark" + } + ] + }, + "id": "2d05bd68-3fbb-4b47-9f38-c690a5d93e45", + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "a75c583fa46148eaa020d3e88ab53802", + "name": "test_vm", + "created": "2013-12-02T18:40:36Z", + "tenant_id": "dd3eca3de72846948f5d6d975660d325", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [ + { + "id": "0056485c-ada5-4e44-9905-5b09a18b0139" + } + ], + "accessIPv4": "", + "accessIPv6": "", + "fault": { + "message": "The server has either erred or is incapable of performing the requested operation. (HTTP 500) (Request-ID: req-5ec1e01c-bc04-43e7-957d-c810d4357908)", + "code": 500, + "created": "2013-12-05T21:07:07Z" + }, + "OS-EXT-STS:power_state": 0, + "config_drive": "", + "metadata": {} + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1 @@ +{"servers": [{"status": "BUILD", "updated": "2011-10-11T00:50:04Z", "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", "user_id": "rs-reach", "name": "lc-test-2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.35"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe47:788a"}], "private": [{"version": 4, "addr": "10.182.64.34"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe60:187d"}], "mynetwork": [{"version": 4, "addr": "12.16.18.28"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:51:39Z", "uuid": "02786501-714e-40af-8342-9c17eccb166d", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 25, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12065, "metadata": {}, "OS-DCF:diskConfig": "AUTO"}, {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}, "OS-DCF:diskConfig": "AUTO"}]} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,78 @@ +{ + "server": { + "id": "52415800-8b69-11e0-9b19-734f565bc83b", + "tenantId": "1234", + "userId": "5678", + "name": "new-server-test", + "created": "2010-11-11T12:00:00Z", + "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", + "accessIPv4" : "67.23.10.138", + "accessIPv6" : "::babe:67.23.10.138", + "progress": 0, + "status": "BUILD", + "adminPass": "GFf1j9aP", + "image" : { + "id": "52415800-8b69-11e0-9b19-734f6f006e54", + "name": "CentOS 5.2", + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/images/52415800-8b69-11e0-9b19-734f6f006e54" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f6f006e54" + } + ] + }, + "flavor" : { + "id": "52415800-8b69-11e0-9b19-734f1195ff37", + "name": "256 MB Server", + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/flavors/52415800-8b69-11e0-9b19-734f1195ff37" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/flavors/52415800-8b69-11e0-9b19-734f1195ff37" + } + ] + }, + "metadata": { + "My Server Name": "Apache1" + }, + "addresses": { + "public" : [ + { + "version": 4, + "addr": "67.23.10.138" + }, + { + "version": 6, + "addr": "::babe:67.23.10.138" + } + ], + "private" : [ + { + "version": 4, + "addr": "10.176.42.19" + }, + { + "version": 6, + "addr": "::babe:10.176.42.19" + } + ] + }, + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_pause.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "pause": null +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_resume.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "resume": null +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_suspend.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "suspend": null +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/openstack_v1.1/_servers_unpause.json 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "unpause": null +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + NA1 + US - East + Ashburn + Virginia + US + https://opsource-na1.cloud-vpn.net/ + true + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + + + 53b4c05b-341e-4ac3-b688-bdd74e53ca9b + test-net1 + test-net1 description + NA1 + 10.162.1.0 + false + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Delete Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Delete Server + SUCCESS + Server "Delete" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Power Off Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Power Off Server + SUCCESS + Server "Power Off" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Restart Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + Restart Server + SUCCESS + Server "Restart" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Graceful Shutdown Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + Graceful Shutdown Server + SUCCESS + Server "Graceful Shutdown" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Start Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + Start Server + SUCCESS + Server "Start" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,46 @@ + + + + abadbc7e-9e10-46ca-9d4a-194bcc6b6c16 + testnode01 + this is testnode01 description + + 2 + 2048 + 10 + 20 + + UNIX + REDHAT5/64 + + + 44ed8b72-ebea-11df-bdc1-001517c46384 + 53b4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.1.1 + 200.16.132.7 + 10-162-1-1 + true + 2011-03-02T17:16:09.882Z + + + dbadbc8e-9e10-56ca-5d4a-155bcc5b5c15 + testnode02 + this is testnode02 description + + 4 + 4096 + 10 + 20 + + UNIX + REDHAT5/64 + + + 44ed8b72-ebea-11df-bdc1-001517c46384 + 53b4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.1.2 + 10-162-1-2 + true + 2011-03-02T17:16:10.882Z + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ + + + + e75ead52-692f-4314-8725-c8a4f4d13a87 + test2 + test2 node + + 1 + 2048 + 10 + 0 + + UNIX + REDHAT5/64 + + + 52ed8b72-ebea-11df-bdc1-001517c46384 + 52f4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.151.11 + + DEPLOY_SERVER + 2011-03-20T22:32:23.000Z + copia + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + Deploy Server + SUCCESS + Server "Deploy" issued + REASON_0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_base_image.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_base_image.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_base_image.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_base_image.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,339 @@ + + + + 52ed8b72-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8b72-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 1 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8dca-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8dca-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 2 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 2 + 4096 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8ed8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8ed8-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 4 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 4 + 6144 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 6fc040ae-3605-11e0-bfb5-001517c46384 + /oec/base/image/6fc040ae-3605-11e0-bfb5-001517c46384 + RedHat 5.5 32-bit 1 CPU + RedHat 5.5 Enterprise (Tikanga), 32-bit + + UNIX + REDHAT5/32 + + NA1 + 1 + 2048 + 10 + 0 + 2011-02-11T17:36:19.000Z + + + 52ed92d4-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed92d4-ebea-11df-bdc1-001517c46384 + Ubuntu 8.04.4 2 CPU + Ubuntu 8.04.4 LTS, 64-bit + + UNIX + UBUNTU8/64 + + NA1 + 2 + 4096 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed876c-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed876c-ebea-11df-bdc1-001517c46384 + Win2008 Ent 64-bit R2 2 CPU + Windows 2008 Enterprise R2 64-bit + + WINDOWS + WIN2008R2E/64 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8a5a-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8a5a-ebea-11df-bdc1-001517c46384 + Win2008 Ent 64-bit R2 4 CPU + Windows 2008 Enterprise R2 64-bit + + WINDOWS + WIN2008R2E/64 + + NA1 + 4 + 8192 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed865e-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed865e-ebea-11df-bdc1-001517c46384 + Win2008 Std 64-bit R2 2 CPU + Windows 2008 Standard R2 64-bit + + WINDOWS + WIN2008R2S/64 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7b96-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7b96-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 1 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 1 + 2048 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7cb8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7cb8-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 2 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7da8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7da8-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 4 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 4 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7ea2-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7ea2-ebea-11df-bdc1-001517c46384 + Win2008 Ent 32-bit 2 CPU + Windows 2008 Enterprise SP2 32-bit + + WINDOWS + WIN2008E/32 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8fd2-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8fd2-ebea-11df-bdc1-001517c46384 + Red Hat 4.8 32-bit 1 CPU + Red Hat ES 4.8 (Nahant), 32-bit + + UNIX + REDHAT4/32 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed90cc-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed90cc-ebea-11df-bdc1-001517c46384 + CentOS 5.5 32-bit 1 CPU + CentOS release 5.5, 32-bit + + UNIX + CENTOS5/32 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed91da-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed91da-ebea-11df-bdc1-001517c46384 + CentOS 5.5 64-bit 1 CPU + CentOS release 5.5, 64-bit + + UNIX + CENTOS5/64 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed766e-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed766e-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 1 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 1 + 2048 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7876-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7876-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 2 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 2 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7984-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7984-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 4 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 4 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7a88-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7a88-ebea-11df-bdc1-001517c46384 + Win2003 Std 64-bit 2 CPU + Windows 2003 Standard x64 SP2, 64-bit + + WINDOWS + WIN2003S/64 + + NA1 + 2 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 0c231ef0-2a42-11e0-bfb5-001517c46384 + /oec/base/image/0c231ef0-2a42-11e0-bfb5-001517c46384 + RedHat 64-bit 2 CPU with MySQL + RedHat 5.5 Enterprise with MySQL 5.5 installed + + UNIX + REDHAT5/64 + + NA1 + 2 + 8192 + 10 + 0 + 2011-01-27T18:19:58.000Z + + + 2fb5261a-2a42-11e0-bfb5-001517c46384 + /oec/base/image/2fb5261a-2a42-11e0-bfb5-001517c46384 + RedHat 64-bit 2 CPU with PostgreSQL + RedHat 5.5 Enterprise with PostgreSQL 9.0 installed + + UNIX + REDHAT5/64 + + NA1 + 2 + 8192 + 10 + 0 + 2011-01-27T18:20:57.000Z + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_myaccount.xml libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_myaccount.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/opsource/oec_0_9_myaccount.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/opsource/oec_0_9_myaccount.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ + + + testuser + Test User + Test + User + test@example.com + 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 + + + create image + + + reports + + + server + + + primary administrator + + + network + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_distributions.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_distributions.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_distributions.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_distributions.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ +{ "get_distros_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Here are the distros we are offering on new orders." + , "response_display_duration_type" : "REGULAR" + , "distro_infos" : + [{ "distro_code" : "lenny" + , "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"} + , { "distro_code" : "centos5" + , "distro_description" : "Centos5"} + , { "distro_code" : "ubuntu904" + , "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"} + , { "distro_code" : "ubuntu804" + , "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"} + , { "distro_code" : "ubuntu810" + , "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"} + , { "distro_code" : "fedora10" + , "distro_description" : "Fedora 10"}] + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,27 @@ +{ "get_orders_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Found 15 orders" + , "response_display_duration_type" : "REGULAR", + "about_orders" : + [{ "order_oid" : 88833465 + , "domain_name" : "api.ivan.net.nz" + , "slug" : "order-88833465-api-ivan-net-nz" + , "billing_oid" : 96122465 + , "is_on_customers_own_physical_server" : false + , "vps_parameters" : { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "host_server_oid" : "764" + , "server_type" : "VPS" + , "data_transfer_allowance" : { "data_transfer_gb" : 30 + , "data_transfer" : "30"} + , "billing_info" : { "monthly_recurring_fee": 19.99 } + , "allocated_ips" : { "primary_ip" : "1.2.3.4" + , "secondary_ips" : ["1.2.3.5","1.2.3.6"]} + , "running_state" : "RUNNING" + }] + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_new_vps.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,62 @@ +{ "post_new_vps_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : null + , "response_display_duration_type" : "REGULAR" + , "setup_messages" : + ["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: Ivan Meredith" + , "No VPS paramters provided, using default values."] + , "about_order" : + { "order_oid" : 52255865 + , "domain_name" : "api.ivan.net.nz" + , "slug" : "order-52255865-api-ivan-net-nz" + , "billing_oid" : 96122465 + , "is_on_customers_own_physical_server" : false + , "vps_parameters" : + { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "host_server_oid" : "764" + , "server_type" : "VPS" + , "data_transfer_allowance" : + { "data_transfer_gb" : 30 , "data_transfer" : "30"} + , "billing_info" : { "monthly_recurring_fee" : 19.99 } + , "allocated_ips" : + { "primary_ip" : "74.50.57.80", "secondary_ips" : []} + , "running_state" : "RUNNING"} + , "new_order_request" : + { "billing_oid" : 96122465 + , "user_oid" : 0 + , "host_server_oid" : null + , "vps_order_oid_to_clone" : 0 + , "ip_request" : + { "num_ips" : 1, "extra_ip_reason" : ""} + , "vps_parameters" : + { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "pricing_plan_code" : "MIRO1B" + , "instantiation_options" : + { "control_panel" : "webmin" + , "domain_name" : "api.ivan.net.nz" + , "password" : "aruxauce27" + , "distro" : "lenny"}} + , "running_vps_info" : + { "pings_ok" : true + , "current_kernel" : "default" + , "current_kernel_canonical" : "2.6.30.5-xenU.i386" + , "last_backup_message" : "" + , "is_console_login_enabled" : false + , "console_public_authorized_keys" : null + , "is_backup_running" : false + , "is_backups_enabled" : true + , "next_backup_time" : + { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} + , "vps_uptime_s" : 31 + , "vps_cpu_time_s" : 6 + , "running_state" : "RUNNING" + , "is_suspended" : false} + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,13 @@ +{ "delete_server_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Server removed" + , "response_display_duration_type" : "REGULAR" + , "cancel_messages" : + ["api.ivan.net.nz is being shut down." + , "A $7.98 credit has been added to your account." + , "If you need to un-cancel the server please contact our support team."] + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,40 @@ +{ "put_running_state_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK." + , "response_display_duration_type" : "REGULAR" + , "is_restarted" : true + , "is_pinging" : true + , "running_vps_info" : + { "pings_ok" : true + , "current_kernel" : "default" + , "current_kernel_canonical" : "2.6.30.5-xenU.i386" + , "last_backup_message" : "" + , "is_console_login_enabled" : false + , "console_public_authorized_keys" : null + , "is_backup_running" : false + , "is_backups_enabled" : true + , "next_backup_time" : + { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} + , "vps_uptime_s" : 19 + , "vps_cpu_time_s" : 5 + , "running_state" : "RUNNING" + , "is_suspended" : false} + , "host_server_info" : { "is_host64_bit_capable" : true + , "default_kernel_i386" : "2.6.30.5-xenU.i386" + , "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64" + , "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz" + , "host_num_cores" : 1 + , "host_xen_version" : "3.4.1" + , "hostload" : [1.45 + , 0.56 + , 0.28] + , "host_uptime_s" : 3378276 + , "host_mem_mb_free" : 51825 + , "host_mem_mb_total" : 73719 + , "running_vpss" : 34} + , "running_state_messages" : null + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/rimuhosting/r_pricing_plans.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +{"get_pricing_plans_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Here some pricing plans we are offering on new orders.  Note we offer most disk and memory sizes.  So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit.  Pricing is in USD.  If you are an NZ-based customer then we would need to add GST." + , "response_display_duration_type" : "REGULAR" + , "pricing_plan_infos" : + [{ "pricing_plan_code" : "MiroVPSLowContention" + , "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)" + , "monthly_recurring_fee" : 32.54 + , "monthly_recurring_amt" : { + "amt" : 35.0 + , "currency" : "CUR_AUD" + , "amt_usd" : 32.54} + , "minimum_memory_mb" : 950 + , "minimum_disk_gb" : 20 + , "minimum_data_transfer_allowance_gb" : 75 + , "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention" + , "server_type" : "VPS" + , "offered_at_data_center" : + { "data_center_location_code" : "DCDALLAS" + , "data_center_location_name" : "Dallas" } + }] + } +} diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/empty.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/empty.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,2 @@ + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/fail.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/fail.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/fail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/fail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + faultCode + fail + + + faultString + Failed Call + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/SoftLayer_Account.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + faultCode + SoftLayer_Account + + + faultString + Failed Call + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,1538 @@ + + + + + + + + + + accountId + + 282402 + + + + createDate + + 2013-11-06T14:38:36+01:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + example.com + + + + fullyQualifiedDomainName + + libcloud-testing1.example.com + + + + hostname + + libcloud-testing1 + + + + id + + 2875152 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + 2013-11-06T14:41:25+01:00 + + + + startCpus + + 1 + + + + statusId + + 1001 + + + + uuid + + ab98fc82-9d74-bc34-6701-f88957ebbba8 + + + + billingItem + + + + allowCancellationFlag + + 1 + + + + cancellationDate + + + + + + categoryCode + + guest_core + + + + createDate + + 2013-11-06T14:38:38+01:00 + + + + currentHourlyCharge + + .112 + + + + cycleStartDate + + 2013-11-06T14:43:20+01:00 + + + + description + + 1 x 2.0 GHz Core + + + + domainName + + example.com + + + + hostName + + libcloud-testing1 + + + + hourlyRecurringFee + + .056 + + + + hoursUsed + + 2 + + + + id + + 16447700 + + + + laborFee + + 0 + + + + laborFeeTaxRate + + .21 + + + + lastBillDate + + 2013-11-06T14:43:20+01:00 + + + + modifyDate + + 2013-11-06T14:43:20+01:00 + + + + nextBillDate + + 2013-11-28T07:00:00+01:00 + + + + oneTimeFee + + 0 + + + + oneTimeFeeTaxRate + + .21 + + + + orderItemId + + 22662589 + + + + parentId + + + + + + recurringFee + + .112 + + + + recurringFeeTaxRate + + .21 + + + + recurringMonths + + 1 + + + + serviceProviderId + + 1 + + + + setupFee + + 0 + + + + setupFeeTaxRate + + .21 + + + + resourceTableId + + 2875152 + + + + + + + datacenter + + + + id + + 168642 + + + + longName + + San Jose 1 + + + + name + + sjc01 + + + + + + + globalIdentifier + + 9ee84f87-e558-4548-9b89-fe7d997706b8 + + + + operatingSystem + + + + hardwareId + + + + + + id + + 2200995 + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + createDate + + 2013-11-06T14:38:56+01:00 + + + + id + + 1856975 + + + + modifyDate + + 2013-11-06T14:38:56+01:00 + + + + password + + L3TJVubf + + + + port + + + + + + softwareId + + 2200995 + + + + username + + root + + + + + + + + + + softwareLicense + + + + id + + 1523 + + + + softwareDescriptionId + + 1163 + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 1163 + + + + longDescription + + Debian / Debian / 7.0.0-64 Minimal for CCI + + + + manufacturer + + Debian + + + + name + + Debian + + + + operatingSystem + + 1 + + + + referenceCode + + DEBIAN_7_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 7.0.0-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + powerState + + + + keyName + + RUNNING + + + + name + + Running + + + + + + + primaryBackendIpAddress + + 10.55.43.130 + + + + primaryIpAddress + + 50.23.95.202 + + + + + + + + accountId + + 282402 + + + + createDate + + 2013-11-06T15:14:52+01:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + example.com + + + + fullyQualifiedDomainName + + libcloud-testing2.example.com + + + + hostname + + libcloud-testing2 + + + + id + + 2875213 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + 2013-11-06T15:17:56+01:00 + + + + startCpus + + 1 + + + + statusId + + 1001 + + + + uuid + + 8f10bea1-3e26-70d7-d581-0d9e820eae0c + + + + billingItem + + + + allowCancellationFlag + + 1 + + + + cancellationDate + + + + + + categoryCode + + guest_core + + + + createDate + + 2013-11-06T15:14:55+01:00 + + + + currentHourlyCharge + + .056 + + + + cycleStartDate + + 2013-11-06T15:19:50+01:00 + + + + description + + 1 x 2.0 GHz Core + + + + domainName + + example.com + + + + hostName + + libcloud-testing2 + + + + hourlyRecurringFee + + .056 + + + + hoursUsed + + 1 + + + + id + + 16447908 + + + + laborFee + + 0 + + + + laborFeeTaxRate + + .21 + + + + lastBillDate + + 2013-11-06T15:19:50+01:00 + + + + modifyDate + + 2013-11-06T15:19:50+01:00 + + + + nextBillDate + + 2013-11-28T07:00:00+01:00 + + + + oneTimeFee + + 0 + + + + oneTimeFeeTaxRate + + .21 + + + + orderItemId + + 22663091 + + + + parentId + + + + + + recurringFee + + .056 + + + + recurringFeeTaxRate + + .21 + + + + recurringMonths + + 1 + + + + serviceProviderId + + 1 + + + + setupFee + + 0 + + + + setupFeeTaxRate + + .21 + + + + resourceTableId + + 2875213 + + + + + + + datacenter + + + + id + + 168642 + + + + longName + + San Jose 1 + + + + name + + sjc01 + + + + + + + globalIdentifier + + 5c704e34-6ee7-4efe-9722-af9d406fa930 + + + + operatingSystem + + + + hardwareId + + + + + + id + + 2201027 + + + + manufacturerLicenseInstance + + + + + + softwareLicense + + + + id + + 1523 + + + + softwareDescriptionId + + 1163 + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 1163 + + + + longDescription + + Debian / Debian / 7.0.0-64 Minimal for CCI + + + + manufacturer + + Debian + + + + name + + Debian + + + + operatingSystem + + 1 + + + + referenceCode + + DEBIAN_7_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 7.0.0-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + powerState + + + + keyName + + INITIATING + + + + name + + Running + + + + + + + primaryBackendIpAddress + + 10.55.43.131 + + + + primaryIpAddress + + 50.23.95.203 + + + + + + + + accountId + + 282402 + + + + createDate + + 2013-11-06T15:36:53+01:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + example.com + + + + fullyQualifiedDomainName + + libcloud-testing.example.com + + + + hostname + + libcloud-testing + + + + id + + 2875273 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + 2013-11-06T15:39:35+01:00 + + + + startCpus + + 1 + + + + statusId + + 1001 + + + + uuid + + f86371c5-103b-34d3-ae27-e4dafa1c4718 + + + + billingItem + + + + allowCancellationFlag + + 1 + + + + cancellationDate + + + + + + categoryCode + + guest_core + + + + createDate + + 2013-11-06T15:36:55+01:00 + + + + currentHourlyCharge + + .056 + + + + cycleStartDate + + 2013-11-06T15:41:31+01:00 + + + + description + + 1 x 2.0 GHz Core + + + + domainName + + example.com + + + + hostName + + libcloud-testing + + + + hourlyRecurringFee + + .056 + + + + hoursUsed + + 1 + + + + id + + 16448162 + + + + laborFee + + 0 + + + + laborFeeTaxRate + + .21 + + + + lastBillDate + + 2013-11-06T15:41:31+01:00 + + + + modifyDate + + 2013-11-06T15:41:31+01:00 + + + + nextBillDate + + 2013-11-28T07:00:00+01:00 + + + + oneTimeFee + + 0 + + + + oneTimeFeeTaxRate + + .21 + + + + orderItemId + + 22663578 + + + + parentId + + + + + + recurringFee + + .056 + + + + recurringFeeTaxRate + + .21 + + + + recurringMonths + + 1 + + + + serviceProviderId + + 1 + + + + setupFee + + 0 + + + + setupFeeTaxRate + + .21 + + + + resourceTableId + + 2875273 + + + + + + + datacenter + + + + id + + 168642 + + + + longName + + San Jose 1 + + + + name + + sjc01 + + + + + + + globalIdentifier + + e8ab9d1c-edd8-4a1a-a13c-ff74838b5ab6 + + + + operatingSystem + + + + hardwareId + + + + + + id + + 2201049 + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + createDate + + 2013-11-06T15:37:10+01:00 + + + + id + + 1857066 + + + + modifyDate + + 2013-11-06T15:37:10+01:00 + + + + password + + HmyHw89J + + + + port + + + + + + softwareId + + 2201049 + + + + username + + root + + + + + + + + + + softwareLicense + + + + id + + 1523 + + + + softwareDescriptionId + + 1163 + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 1163 + + + + longDescription + + Debian / Debian / 7.0.0-64 Minimal for CCI + + + + manufacturer + + Debian + + + + name + + Debian + + + + operatingSystem + + 1 + + + + referenceCode + + DEBIAN_7_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 7.0.0-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + powerState + + + + keyName + + RUNNING + + + + name + + Running + + + + + + + primaryBackendIpAddress + + 10.55.43.132 + + + + primaryIpAddress + + 50.23.95.204 + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,121 @@ + + + + + + + + + + id + + 2 + + + + name + + dal05 + + + + longName + + Dallas 5 + + + + + + + + id + + 3 + + + + name + + dal01 + + + + longName + + Dallas + + + + + + + + id + + 18171 + + + + name + + sea01 + + + + longName + + Seattle + + + + + + + + id + + 37473 + + + + name + + wdc01 + + + + longName + + Washington, DC + + + + + + + + id + + 12345 + + + + name + + newcity01 + + + + longName + + New City + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_createObject.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,142 @@ + + + + + + + accountId + + 12345 + + + + createDate + + 2013-01-01T19:31:22-06:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + domain.com + + + + fullyQualifiedDomainName + + hostname.domain.com + + + + hostname + + hostname + + + + id + + 123456 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + + + + + privateNetworkOnlyFlag + + 0 + + + + startCpus + + 2 + + + + statusId + + 1001 + + + + globalIdentifier + + f47ac10b-58cc-4372-a567-0e02b2c3d479 + + + + managedResourceFlag + + 0 + + + + powerState + + + + keyName + + HALTED + + + + name + + Halted + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,12527 @@ + + + + + + + blockDevices + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + 25 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 0 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .025 + + + + recurringFee + + 18 + + + + item + + + + description + + 100 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 0 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .012 + + + + recurringFee + + 8 + + + + item + + + + description + + 10 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 10 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .015 + + + + recurringFee + + 10 + + + + item + + + + description + + 20 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 20 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .016 + + + + recurringFee + + 11 + + + + item + + + + description + + 25 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .017 + + + + recurringFee + + 12 + + + + item + + + + description + + 30 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 30 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .019 + + + + recurringFee + + 14 + + + + item + + + + description + + 40 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 40 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .021 + + + + recurringFee + + 16 + + + + item + + + + description + + 50 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 50 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .023 + + + + recurringFee + + 17 + + + + item + + + + description + + 75 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 75 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .025 + + + + recurringFee + + 18 + + + + item + + + + description + + 100 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .028 + + + + recurringFee + + 19 + + + + item + + + + description + + 125 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 125 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 20 + + + + item + + + + description + + 150 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 150 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .035 + + + + recurringFee + + 21 + + + + item + + + + description + + 175 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 175 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .04 + + + + recurringFee + + 22 + + + + item + + + + description + + 200 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 200 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 25 + + + + item + + + + description + + 250 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 250 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .06 + + + + recurringFee + + 32.5 + + + + item + + + + description + + 300 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 300 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .07 + + + + recurringFee + + 40 + + + + item + + + + description + + 350 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 350 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + 400 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 400 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + 500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .15 + + + + recurringFee + + 75 + + + + item + + + + description + + 750 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 750 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .2 + + + + recurringFee + + 100 + + + + item + + + + description + + 1,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 1000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .25 + + + + recurringFee + + 150 + + + + item + + + + description + + 1,500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 1500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .3 + + + + recurringFee + + 200 + + + + item + + + + description + + 2,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 2000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .012 + + + + recurringFee + + 8 + + + + item + + + + description + + 10 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 10 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .015 + + + + recurringFee + + 10 + + + + item + + + + description + + 20 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 20 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .016 + + + + recurringFee + + 11 + + + + item + + + + description + + 25 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .017 + + + + recurringFee + + 12 + + + + item + + + + description + + 30 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 30 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .019 + + + + recurringFee + + 14 + + + + item + + + + description + + 40 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 40 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .021 + + + + recurringFee + + 16 + + + + item + + + + description + + 50 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 50 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .023 + + + + recurringFee + + 17 + + + + item + + + + description + + 75 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 75 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .025 + + + + recurringFee + + 18 + + + + item + + + + description + + 100 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .028 + + + + recurringFee + + 19 + + + + item + + + + description + + 125 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 125 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 20 + + + + item + + + + description + + 150 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 150 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .035 + + + + recurringFee + + 21 + + + + item + + + + description + + 175 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 175 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .04 + + + + recurringFee + + 22 + + + + item + + + + description + + 200 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 200 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 25 + + + + item + + + + description + + 250 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 250 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .06 + + + + recurringFee + + 32.5 + + + + item + + + + description + + 300 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 300 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .07 + + + + recurringFee + + 40 + + + + item + + + + description + + 350 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 350 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + 400 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 400 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + 500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .15 + + + + recurringFee + + 75 + + + + item + + + + description + + 750 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 750 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .2 + + + + recurringFee + + 100 + + + + item + + + + description + + 1,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 1000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .25 + + + + recurringFee + + 150 + + + + item + + + + description + + 1,500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 1500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .3 + + + + recurringFee + + 200 + + + + item + + + + description + + 2,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 3 + + + + diskImage + + + + capacity + + 2000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .012 + + + + recurringFee + + 8 + + + + item + + + + description + + 10 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 10 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .015 + + + + recurringFee + + 10 + + + + item + + + + description + + 20 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 20 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .016 + + + + recurringFee + + 11 + + + + item + + + + description + + 25 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .017 + + + + recurringFee + + 12 + + + + item + + + + description + + 30 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 30 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .019 + + + + recurringFee + + 14 + + + + item + + + + description + + 40 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 40 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .021 + + + + recurringFee + + 16 + + + + item + + + + description + + 50 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 50 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .023 + + + + recurringFee + + 17 + + + + item + + + + description + + 75 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 75 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .025 + + + + recurringFee + + 18 + + + + item + + + + description + + 100 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .028 + + + + recurringFee + + 19 + + + + item + + + + description + + 125 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 125 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 20 + + + + item + + + + description + + 150 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 150 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .035 + + + + recurringFee + + 21 + + + + item + + + + description + + 175 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 175 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .04 + + + + recurringFee + + 22 + + + + item + + + + description + + 200 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 200 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 25 + + + + item + + + + description + + 250 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 250 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .06 + + + + recurringFee + + 32.5 + + + + item + + + + description + + 300 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 300 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .07 + + + + recurringFee + + 40 + + + + item + + + + description + + 350 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 350 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + 400 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 400 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + 500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .15 + + + + recurringFee + + 75 + + + + item + + + + description + + 750 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 750 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .2 + + + + recurringFee + + 100 + + + + item + + + + description + + 1,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 1000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .25 + + + + recurringFee + + 150 + + + + item + + + + description + + 1,500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 1500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .3 + + + + recurringFee + + 200 + + + + item + + + + description + + 2,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 4 + + + + diskImage + + + + capacity + + 2000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .012 + + + + recurringFee + + 8 + + + + item + + + + description + + 10 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 10 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .015 + + + + recurringFee + + 10 + + + + item + + + + description + + 20 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 20 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .016 + + + + recurringFee + + 11 + + + + item + + + + description + + 25 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .017 + + + + recurringFee + + 12 + + + + item + + + + description + + 30 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 30 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .019 + + + + recurringFee + + 14 + + + + item + + + + description + + 40 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 40 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .021 + + + + recurringFee + + 16 + + + + item + + + + description + + 50 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 50 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .023 + + + + recurringFee + + 17 + + + + item + + + + description + + 75 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 75 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .025 + + + + recurringFee + + 18 + + + + item + + + + description + + 100 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .028 + + + + recurringFee + + 19 + + + + item + + + + description + + 125 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 125 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 20 + + + + item + + + + description + + 150 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 150 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .035 + + + + recurringFee + + 21 + + + + item + + + + description + + 175 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 175 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .04 + + + + recurringFee + + 22 + + + + item + + + + description + + 200 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 200 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 25 + + + + item + + + + description + + 250 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 250 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .06 + + + + recurringFee + + 32.5 + + + + item + + + + description + + 300 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 300 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .07 + + + + recurringFee + + 40 + + + + item + + + + description + + 350 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 350 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + 400 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 400 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + 500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .15 + + + + recurringFee + + 75 + + + + item + + + + description + + 750 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 750 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .2 + + + + recurringFee + + 100 + + + + item + + + + description + + 1,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 1000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .25 + + + + recurringFee + + 150 + + + + item + + + + description + + 1,500 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 1500 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .3 + + + + recurringFee + + 200 + + + + item + + + + description + + 2,000 GB (SAN) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 5 + + + + diskImage + + + + capacity + + 2000 + + + + + + + + + + + + + localDiskFlag + + 0 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + 25 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 0 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .01 + + + + recurringFee + + 7 + + + + item + + + + description + + 100 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 0 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .005 + + + + recurringFee + + 3.25 + + + + item + + + + description + + 25 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 25 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .01 + + + + recurringFee + + 7 + + + + item + + + + description + + 100 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 100 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .015 + + + + recurringFee + + 9 + + + + item + + + + description + + 150 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 150 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .02 + + + + recurringFee + + 14 + + + + item + + + + description + + 200 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 200 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 21 + + + + item + + + + description + + 300 GB (LOCAL) + + + + + + + + + + template + + + + blockDevices + + + + + + + device + + 2 + + + + diskImage + + + + capacity + + 300 + + + + + + + + + + + + + localDiskFlag + + 1 + + + + + + + + + + + + + datacenters + + + + + + + template + + + + datacenter + + + + name + + ams01 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + dal05 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + dal06 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + sea01 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + sjc01 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + sng01 + + + + + + + + + + + + + + template + + + + datacenter + + + + name + + wdc01 + + + + + + + + + + + + + + + + memory + + + + + + + itemPrice + + + + hourlyRecurringFee + + .03 + + + + recurringFee + + 21 + + + + item + + + + description + + 1 GB + + + + + + + + + + template + + + + maxMemory + + 1024 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .06 + + + + recurringFee + + 42 + + + + item + + + + description + + 2 GB + + + + + + + + + + template + + + + maxMemory + + 2048 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .085 + + + + recurringFee + + 59.5 + + + + item + + + + description + + 3 GB + + + + + + + + + + template + + + + maxMemory + + 3072 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .11 + + + + recurringFee + + 77 + + + + item + + + + description + + 4 GB + + + + + + + + + + template + + + + maxMemory + + 4096 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .14 + + + + recurringFee + + 98 + + + + item + + + + description + + 5 GB + + + + + + + + + + template + + + + maxMemory + + 5120 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .165 + + + + recurringFee + + 115.5 + + + + item + + + + description + + 6 GB + + + + + + + + + + template + + + + maxMemory + + 6144 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .195 + + + + recurringFee + + 136.5 + + + + item + + + + description + + 7 GB + + + + + + + + + + template + + + + maxMemory + + 7168 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .215 + + + + recurringFee + + 150.5 + + + + item + + + + description + + 8 GB + + + + + + + + + + template + + + + maxMemory + + 8192 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .245 + + + + recurringFee + + 171.5 + + + + item + + + + description + + 9 GB + + + + + + + + + + template + + + + maxMemory + + 9216 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .265 + + + + recurringFee + + 185.5 + + + + item + + + + description + + 10 GB + + + + + + + + + + template + + + + maxMemory + + 10240 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .29 + + + + recurringFee + + 203 + + + + item + + + + description + + 11 GB + + + + + + + + + + template + + + + maxMemory + + 11264 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .31 + + + + recurringFee + + 217 + + + + item + + + + description + + 12 GB + + + + + + + + + + template + + + + maxMemory + + 12288 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .33 + + + + recurringFee + + 231 + + + + item + + + + description + + 13 GB + + + + + + + + + + template + + + + maxMemory + + 13312 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .345 + + + + recurringFee + + 241.5 + + + + item + + + + description + + 14 GB + + + + + + + + + + template + + + + maxMemory + + 14336 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .36 + + + + recurringFee + + 252 + + + + item + + + + description + + 15 GB + + + + + + + + + + template + + + + maxMemory + + 15360 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .375 + + + + recurringFee + + 262.5 + + + + item + + + + description + + 16 GB + + + + + + + + + + template + + + + maxMemory + + 16384 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .76 + + + + recurringFee + + 525 + + + + item + + + + description + + 32 GB + + + + + + + + + + template + + + + maxMemory + + 32768 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 3.5 + + + + recurringFee + + 2500 + + + + item + + + + description + + 48 GB + + + + + + + + + + template + + + + maxMemory + + 49152 + + + + + + + + + + + + + networkComponents + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + 10 Mbps Public & Private Networks + + + + + + + + + + template + + + + networkComponents + + + + + + + maxSpeed + + 10 + + + + + + + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + 100 Mbps Private Network + + + + + + + + + + template + + + + networkComponents + + + + + + + maxSpeed + + 100 + + + + + + + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .04 + + + + recurringFee + + 20 + + + + item + + + + description + + 1 Gbps Public & Private Networks + + + + + + + + + + template + + + + networkComponents + + + + + + + maxSpeed + + 1000 + + + + + + + + + + + + + + + + + + + operatingSystems + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + CentOS 6.0 - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CENTOS_6_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + CentOS 6.0 - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CENTOS_6_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + CentOS 5 - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CENTOS_5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + CentOS 5 - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CENTOS_5_32 + + + + + + + + + + + itemPrice + + + + recurringFee + + 12 + + + + item + + + + description + + CloudLinux 6 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CLOUDLINUX_6_64 + + + + + + + + + + + itemPrice + + + + recurringFee + + 12 + + + + item + + + + description + + CloudLinux 6 (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CLOUDLINUX_6_32 + + + + + + + + + + + itemPrice + + + + recurringFee + + 12 + + + + item + + + + description + + CloudLinux 5 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CLOUDLINUX_5_64 + + + + + + + + + + + itemPrice + + + + recurringFee + + 12 + + + + item + + + + description + + CloudLinux 5 (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + CLOUDLINUX_5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_7_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_7_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_6_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_6_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + DEBIAN_5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + Red Hat Enterprise Linux 6 - Minimal Install (64 bit) (1 - 4 Core) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_6_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .12 + + + + recurringFee + + 100 + + + + item + + + + description + + Red Hat Enterprise Linux 6 - Minimal Install (64 bit) (5 - 100 Core) + + + + id + + -3839 + + + + softwareDescriptionId + + + + + + upgradeItemId + + + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 795 + + + + longDescription + + Redhat / EL / 6.0-64 Minimal for CCI + + + + manufacturer + + Redhat + + + + name + + EL + + + + operatingSystem + + 1 + + + + referenceCode + + REDHAT_6_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 6.0-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_6_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + Red Hat Enterprise Linux 6 - Minimal Install (32 bit) (1 - 4 Core) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_6_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .12 + + + + recurringFee + + 100 + + + + item + + + + description + + Red Hat Enterprise Linux 6 - Minimal Install (32 bit) (5 - 100 Core) + + + + id + + -3837 + + + + softwareDescriptionId + + + + + + upgradeItemId + + + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 796 + + + + longDescription + + Redhat / EL / 6.0-32 Minimal for CCI + + + + manufacturer + + Redhat + + + + name + + EL + + + + operatingSystem + + 1 + + + + referenceCode + + REDHAT_6_32 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 6.0-32 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_6_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + Red Hat Enterprise Linux 5 - Minimal Install (64 bit) (1 - 4 Core) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .12 + + + + recurringFee + + 100 + + + + item + + + + description + + Red Hat Enterprise Linux 5 - Minimal Install (64 bit) (5 - 100 Core) + + + + id + + -880 + + + + softwareDescriptionId + + + + + + upgradeItemId + + + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 664 + + + + longDescription + + Redhat / EL / 5.5-64 Minimal for CCI + + + + manufacturer + + Redhat + + + + name + + EL + + + + operatingSystem + + 1 + + + + referenceCode + + REDHAT_5_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 5.5-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .08 + + + + recurringFee + + 45 + + + + item + + + + description + + Red Hat Enterprise Linux 5 - Minimal Install (32 bit) (1 - 4 Core) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .12 + + + + recurringFee + + 100 + + + + item + + + + description + + Red Hat Enterprise Linux 5 - Minimal Install (32 bit) (5 - 100 Core) + + + + id + + -879 + + + + softwareDescriptionId + + + + + + upgradeItemId + + + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 662 + + + + longDescription + + Redhat / EL / 5.5-32 Minimal for CCI + + + + manufacturer + + Redhat + + + + name + + EL + + + + operatingSystem + + 1 + + + + referenceCode + + REDHAT_5_32 + + + + upgradeSoftwareDescriptionId + + 927 + + + + upgradeSwDescId + + 927 + + + + version + + 5.5-32 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + template + + + + operatingSystemReferenceCode + + REDHAT_5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_12_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_12_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_10_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_10_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_8_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + UBUNTU_8_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Vyatta 6.6 Community Edition (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + VYATTACE_6.6_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + 0 + + + + recurringFee + + 0 + + + + item + + + + description + + Vyatta 6.5 Community Edition (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + VYATTACE_6.5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2012 Standard Edition (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2012-STD_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2012 Datacenter Edition (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2012-DC_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 Standard Edition SP2 (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-STD-SP2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 Standard Edition SP2 (32bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-STD-SP2_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 Standard SP1 with R2 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-STD-R2-SP1_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 R2 Standard Edition (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-STD-R2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + Windows Server 2008 Enterprise Edition SP2 (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-ENT-SP2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + Windows Server 2008 Enterprise Edition SP2 (32bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-ENT-SP2_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .1 + + + + recurringFee + + 50 + + + + item + + + + description + + Windows Server 2008 R2 Enterprise Edition (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-ENT-R2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 Datacenter Edition SP2 (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-DC-SP2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2008 R2 Datacenter Edition (64bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2008-DC-R2_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2003 Standard SP2 with R2 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-STD-SP2-5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2003 Standard SP2 with R2 (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-STD-SP2-5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 50 + + + + item + + + + description + + Windows Server 2003 Enterprise SP2 with R2 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-ENT-SP2-5_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 50 + + + + item + + + + description + + Windows Server 2003 Enterprise SP2 with R2 (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-ENT-SP2-5_32 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2003 Datacenter SP2 with R2 (64 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-DC-SP2-1_64 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .05 + + + + recurringFee + + 20 + + + + item + + + + description + + Windows Server 2003 Datacenter SP2 with R2 (32 bit) + + + + + + + + + + template + + + + operatingSystemReferenceCode + + WIN_2003-DC-SP2-1_32 + + + + + + + + + + + + + processors + + + + + + + itemPrice + + + + hourlyRecurringFee + + .07 + + + + recurringFee + + 29 + + + + item + + + + description + + 1 x 2.0 GHz Core + + + + + + + + + + template + + + + startCpus + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .14 + + + + recurringFee + + 78 + + + + item + + + + description + + 2 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 2 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .205 + + + + recurringFee + + 123.5 + + + + item + + + + description + + 3 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 3 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .265 + + + + recurringFee + + 165.5 + + + + item + + + + description + + 4 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 4 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .325 + + + + recurringFee + + 207.5 + + + + item + + + + description + + 5 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 5 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .39 + + + + recurringFee + + 253 + + + + item + + + + description + + 6 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 6 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .445 + + + + recurringFee + + 291.5 + + + + item + + + + description + + 7 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 7 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .495 + + + + recurringFee + + 326.5 + + + + item + + + + description + + 8 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 8 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .54 + + + + recurringFee + + 358 + + + + item + + + + description + + 9 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 9 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .59 + + + + recurringFee + + 393 + + + + item + + + + description + + 10 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 10 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .635 + + + + recurringFee + + 424.5 + + + + item + + + + description + + 11 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 11 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .66 + + + + recurringFee + + 442 + + + + item + + + + description + + 12 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 12 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .695 + + + + recurringFee + + 466.5 + + + + item + + + + description + + 13 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 13 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .72 + + + + recurringFee + + 484 + + + + item + + + + description + + 14 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 14 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .74 + + + + recurringFee + + 498 + + + + item + + + + description + + 15 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 15 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .75 + + + + recurringFee + + 505 + + + + item + + + + description + + 16 x 2.0 GHz Cores + + + + + + + + + + template + + + + startCpus + + 16 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .22 + + + + recurringFee + + 159 + + + + item + + + + description + + Private 1 x 2.0 GHz Core + + + + + + + + + + template + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + startCpus + + 1 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .3 + + + + recurringFee + + 199 + + + + item + + + + description + + Private 2 x 2.0 GHz Cores + + + + + + + + + + template + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + startCpus + + 2 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .44 + + + + recurringFee + + 299 + + + + item + + + + description + + Private 4 x 2.0 GHz Cores + + + + + + + + + + template + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + startCpus + + 4 + + + + + + + + + + + itemPrice + + + + hourlyRecurringFee + + .58 + + + + recurringFee + + 399 + + + + item + + + + description + + Private 8 x 2.0 GHz Cores + + + + + + + + + + template + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + startCpus + + 8 + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Virtual_Guest_getObject.xml 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,519 @@ + + + + + + + accountId + + 282402 + + + + createDate + + 2013-11-12T19:33:57+01:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + domain + + example.com + + + + fullyQualifiedDomainName + + libcloud-testing.example.com + + + + hostname + + libcloud-testing + + + + id + + 2905761 + + + + lastPowerStateId + + + + + + lastVerifiedDate + + + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + metricPollDate + + + + + + modifyDate + + 2013-11-12T19:36:55+01:00 + + + + startCpus + + 1 + + + + statusId + + 1001 + + + + uuid + + cbc33604-afd0-4820-57c3-6c68ae7c5fe0 + + + + billingItem + + + + allowCancellationFlag + + 1 + + + + cancellationDate + + + + + + categoryCode + + guest_core + + + + createDate + + 2013-11-12T19:33:59+01:00 + + + + currentHourlyCharge + + .056 + + + + cycleStartDate + + 2013-11-12T19:39:03+01:00 + + + + description + + 1 x 2.0 GHz Core + + + + domainName + + example.com + + + + hostName + + libcloud-testing + + + + hourlyRecurringFee + + .056 + + + + hoursUsed + + 1 + + + + id + + 16538495 + + + + laborFee + + 0 + + + + laborFeeTaxRate + + .21 + + + + lastBillDate + + 2013-11-12T19:39:03+01:00 + + + + modifyDate + + 2013-11-12T19:39:03+01:00 + + + + nextBillDate + + 2013-11-28T07:00:00+01:00 + + + + oneTimeFee + + 0 + + + + oneTimeFeeTaxRate + + .21 + + + + orderItemId + + 22774406 + + + + parentId + + + + + + recurringFee + + .056 + + + + recurringFeeTaxRate + + .21 + + + + recurringMonths + + 1 + + + + serviceProviderId + + 1 + + + + setupFee + + 0 + + + + setupFeeTaxRate + + .21 + + + + resourceTableId + + 2905761 + + + + + + + globalIdentifier + + 633fd9e3-4cf7-4c78-b746-c2b76e2c8b88 + + + + managedResourceFlag + + 0 + + + + operatingSystem + + + + hardwareId + + + + + + id + + 2211183 + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + createDate + + 2013-11-12T19:34:16+01:00 + + + + id + + 1867597 + + + + modifyDate + + 2013-11-12T19:34:16+01:00 + + + + password + + LTSp4cpJ + + + + port + + + + + + softwareId + + 2211183 + + + + username + + root + + + + + + + + + + softwareLicense + + + + id + + 1523 + + + + softwareDescriptionId + + 1163 + + + + softwareDescription + + + + controlPanel + + 0 + + + + id + + 1163 + + + + longDescription + + Debian / Debian / 7.0.0-64 Minimal for CCI + + + + manufacturer + + Debian + + + + name + + Debian + + + + operatingSystem + + 1 + + + + referenceCode + + DEBIAN_7_64 + + + + upgradeSoftwareDescriptionId + + + + + + upgradeSwDescId + + + + + + version + + 7.0.0-64 Minimal for CCI + + + + virtualLicense + + 0 + + + + virtualizationPlatform + + 0 + + + + requiredUser + + root + + + + + + + + + + + + + powerState + + + + keyName + + RUNNING + + + + name + + Running + + + + + + + primaryBackendIpAddress + + 10.55.62.124 + + + + primaryIpAddress + + 50.97.215.202 + + + + provisionDate + + 2013-11-12T19:39:03+01:00 + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + 0 + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_login.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_org_240.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_10496.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_task_11001.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,132 @@ + + + + + + + 10.112.78.69 + + + + The kind of installed guest operating system + Red Hat Enterprise Linux 5 (32-bit) + + + Virtual Hardware + + + + + + + + + + + + + Virtual Hardware Family + 0 + + + + + + testerpart2 + vmx-07 + + +
+ + hertz * 10^6 + + + + + Number of Virtual CPUs + 2 virtual CPU(s) + 1 + + + + + + + + 3 + 2 + count + + + +
+ + byte * 2^20 + + + + + Memory Size + 512MB of memory + 2 + + + + + + + + 4 + 512 + byte * 2^20 + + + +
0
+ + + + + + + SCSI Controller + SCSI Controller 0 + 3 + + + + + + + lsilogic + 6 + + + +
+ +
+ 0 + + + + + + + Hard Disk 1 + 10485760 + 9 + + + + 3 + + + + 17 + 10485760 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,132 @@ + + + + + + + 10.112.78.69 + + + + The kind of installed guest operating system + Red Hat Enterprise Linux 5 (32-bit) + + + Virtual Hardware + + + + + + + + + + + + + Virtual Hardware Family + 0 + + + + + + testerpart2 + vmx-07 + + +
+ + hertz * 10^6 + + + + + Number of Virtual CPUs + 2 virtual CPU(s) + 1 + + + + + + + + 3 + 2 + count + + + +
+ + byte * 2^20 + + + + + Memory Size + 512MB of memory + 2 + + + + + + + + 4 + 512 + byte * 2^20 + + + +
0
+ + + + + + + SCSI Controller + SCSI Controller 0 + 3 + + + + + + + lsilogic + 6 + + + +
+ +
+ 0 + + + + + + + Hard Disk 1 + 10485760 + 9 + + + + 3 + + + + 17 + 10485760 + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Account_getVirtualGuests.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,1066 @@ + + + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 1832 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + eaa9aaa2-8e2e-d6e0-ce11-6f01e765779c + + + + hostname + + test1 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + startCpus + + 2 + + + + createDate + + 2009-09-04T14:49:45-05:00 + + + + modifyDate + + 2010-04-22T13:08:47-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 191115 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 166980 + + + + softwareId + + 191115 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2009-09-04T14:49:51-05:00 + + + + modifyDate + + 2009-09-04T14:49:51-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 67.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 13402 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 9e9e9e99-4ed9-4645-19f3-55ee4e404d56 + + + + hostname + + test2 + + + + domain + + libcloud.org + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 1 + + + + createDate + + 2010-03-23T17:06:13-05:00 + + + + modifyDate + + 2010-04-22T13:08:43-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 257314 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 235268 + + + + softwareId + + 257314 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-03-23T17:06:17-05:00 + + + + modifyDate + + 2010-03-23T17:06:17-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19293 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 9f99e19b-2c61-9cd5-2081-67b57fd7977b + + + + hostname + + test3 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:38:53-05:00 + + + + modifyDate + + 2010-04-22T13:08:01-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277185 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250826 + + + + softwareId + + 277185 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:38:57-05:00 + + + + modifyDate + + 2010-04-22T12:38:57-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19288 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 999f77d9-679b-c47d-136d-04cd302384ec + + + + hostname + + test4 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:15:24-05:00 + + + + modifyDate + + 2010-04-22T13:08:31-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277171 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250815 + + + + softwareId + + 277171 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:15:26-05:00 + + + + modifyDate + + 2010-04-22T12:15:26-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19284 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + f3c73738-7731-1372-f3c3-e6808082f824 + + + + hostname + + test5 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:11:23-05:00 + + + + modifyDate + + 2010-04-22T13:08:31-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277167 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250811 + + + + softwareId + + 277167 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:11:27-05:00 + + + + modifyDate + + 2010-04-22T12:11:27-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,99 @@ + + + + + + + + + + id + + 2 + + + + name + + dal00 + + + + longName + + Corporate HQ + + + + + + + + id + + 3 + + + + name + + dal01 + + + + longName + + Dallas + + + + + + + + id + + 18171 + + + + name + + sea01 + + + + longName + + Seattle + + + + + + + + id + + 37473 + + + + name + + wdc01 + + + + longName + + Washington, DC + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCaddRequest.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + requestid + + 51 + + + + status + + success + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCendRequest.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + status + + success + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCextendRequest.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + status + + success + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetImages.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,61 @@ + + + + + + + + + + + id + + 8 + + + + name + + CentOS 5 (EC2) + + + + + + + + id + + 5 + + + + name + + CentOS 5.4 Base (32 bit VM) + + + + + + + + id + + 6 + + + + name + + WinXP Base (32 bit VM) + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestConnectData.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,35 @@ + + + + + + + + status + + ready + + + + serverIP + + 152.46.20.67 + + + + user + + admin + + + + password + + ehkNGW + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestIds.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,60 @@ + + + + + + + + status + + success + + + + requests + + + + + + + requestid + + 51 + + + + imageid + + 5 + + + + imagename + + CentOS 5.4 Base (32 bit VM) + + + + start + + 1334150100 + + + + end + + 1334168100 + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcl/XMLRPCgetRequestStatus.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + status + + ready + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + \CF\CB\AD\5D\1D\34\09\4D\A4\77\8D\A3\CA\99\75\FB + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + + + + false + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,49 @@ + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.netsuite.com + + + 65.41.65.1 + 65.41.65.64 + + + 65.41.65.70 + 65.41.65.88 + + + 65.41.65.90 + 65.41.66.6 + + + 65.41.66.8 + 65.41.66.67 + + + 65.41.66.69 + 65.41.66.108 + + + 65.41.66.110 + 65.41.66.227 + + + 65.41.66.229 + 65.41.67.254 + + + + bridged + false + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + MyOrg Product Development + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_org.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_query_group.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_query_user.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_sessions.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + 100 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy_error.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_task_undeploy.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_get_metadata.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + owners + msamia@netsuite.com + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vapp_post_metadata.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_undeployTest.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,271 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + + + + + The configuration parameters for logical networks + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.myorg.com + + + 65.41.67.1 + 65.41.67.254 + + + + + bridged + false + + + false + 3600 + 7200 + + 65.41.64.2 + 65.41.67.0 + + + + true + drop + false + + true + Allow all outgoing traffic + allow + + true + + -1 + Any + -1 + Any + out + false + + + + true + ipTranslation + allowTraffic + + + automatic + ScrumVM_Master + 0 + + + + + false + + + + + false + + + + + + false + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + mgalet-test2 + vmx-07 + + + 00:50:56:01:00:99 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the operating system installed + Red Hat Enterprise Linux 5 (64-bit) + + + + Specifies the available VM network connections + 0 + + 0 + 65.41.67.2 + true + 00:50:56:01:00:99 + POOL + + + + + Specifies Guest OS Customization Settings + true + false + dd75d1d3-5b7b-48f0-aff3-69622ab7e045 + false + false + true + true + sN#9QH9# + false + mgalet-test2 + + + + Specifies Runtime info + + + ScrumVM_Master + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + true + ReadOnly + + + + FullControl + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + + + + + The configuration parameters for logical networks + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.myorg.com + + + 65.41.67.1 + 65.41.67.254 + + + + + bridged + false + + + false + 3600 + 7200 + + 65.41.64.2 + 65.41.67.0 + + + + true + drop + false + + true + Allow all outgoing traffic + allow + + true + + -1 + Any + -1 + Any + out + false + + + + true + ipTranslation + allowTraffic + + + automatic + ScrumVM_Master + 0 + + + + + false + + + + + true + + + + + + false + + + + + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + mgalet-test2 + vmx-07 + + + 00:50:56:01:00:99 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the operating system installed + Red Hat Enterprise Linux 5 (64-bit) + + + + Specifies the available VM network connections + 0 + + 0 + 65.41.67.2 + true + 00:50:56:01:00:99 + POOL + + + + + Specifies Guest OS Customization Settings + true + false + dd75d1d3-5b7b-48f0-aff3-69622ab7e045 + false + false + true + true + sN#9QH9# + false + mgalet-test2 + + + + Specifies Runtime info + + + ScrumVM_Master + + + VMware ESXi + 5.0.0 + VMware, Inc. + en + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + + + + + The configuration parameters for logical networks + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.myorg.com + + + 65.41.67.1 + 65.41.67.254 + + + + + bridged + false + + + false + 3600 + 7200 + + 65.41.64.2 + 65.41.67.0 + + + + true + drop + false + + true + Allow all outgoing traffic + allow + + true + + -1 + Any + -1 + Any + out + false + + + + true + ipTranslation + allowTraffic + + + automatic + ScrumVM_Master + 0 + + + + + false + + + + + true + + + + + + false + + + + + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + mgalet-test2 + vmx-07 + + + 00:50:56:01:00:99 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the operating system installed + Red Hat Enterprise Linux 5 (64-bit) + + + + Specifies the available VM network connections + 0 + + 0 + 192.168.0.100 + 192.168.0.103 + true + 00:50:56:01:00:d9 + POOL + + + + Specifies Guest OS Customization Settings + true + false + dd75d1d3-5b7b-48f0-aff3-69622ab7e045 + false + false + true + true + sN#9QH9# + false + mgalet-test2 + + + + Specifies Runtime info + + + ScrumVM_Master + + + VMware ESXi + 5.0.0 + VMware, Inc. + en + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,290 @@ + + + + + + + + + + + + + + + + + + Lease settings section + + 0 + 0 + + + VApp startup section + + + + + The list of logical networks + + + + + + The configuration parameters for logical networks + + + + + + + true + 65.41.64.1 + 255.255.252.0 + 65.41.42.113 + 65.41.42.114 + vm.myorg.com + + + 65.41.67.1 + 65.41.67.254 + + + + + bridged + false + + + false + 3600 + 7200 + + 65.41.64.2 + 65.41.67.0 + + + + true + drop + false + + true + Allow all outgoing traffic + allow + + true + + -1 + Any + -1 + Any + out + false + + + + true + ipTranslation + allowTraffic + + + automatic + ScrumVM_Master + 0 + + + + + false + + + + + true + + + + + + false + + + + + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + mgalet-test2 + vmx-07 + + + 00:50:56:01:00:99 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the available VM network connections + 0 + + 0 + 192.168.0.100 + 192.168.0.103 + true + 00:50:56:01:00:d9 + POOL + + + + Specifies Guest OS Customization Settings + true + false + dd75d1d3-5b7b-48f0-aff3-69622ab7e045 + false + false + true + true + sN#9QH9# + false + mgalet-test2 + + + + Specifies Runtime info + + + ScrumVM_Master + + + VMware ESXi + 5.0.0 + VMware, Inc. + en + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vapp_access_to_resource_forbidden.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vApp_vm_test.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,218 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + ScrumVM_Master + vmx-07 + + + 00:50:56:01:00:d9 + 0 + true + vCloud - Default + PCNet32 ethernet adapter + Network adapter 0 + 1 + PCNet32 + 10 + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 1 + Hard disk + Hard disk 2 + + 2001 + 2 + 17 + + + 2 + Hard disk + Hard disk 3 + + 2002 + 2 + 17 + + + 3 + Hard disk + Hard disk 4 + + 2003 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + + 0 + false + CD/DVD Drive + CD/DVD Drive 1 + + 3002 + 3 + 15 + + + 0 + false + Floppy Drive + Floppy Drive 1 + + 8000 + 14 + + + hertz * 10^6 + Number of Virtual CPUs + 2 virtual CPU(s) + 4 + 0 + 3 + 2 + 0 + + + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + + + + + + + + + + + + + + + + Specifies the operating system installed + Red Hat Enterprise Linux 5 (64-bit) + + + + Specifies the available VM network connections + 0 + + 0 + 65.33.65.9 + true + 00:50:56:01:00:d9 + POOL + + + + + Specifies Guest OS Customization Settings + true + false + cbfe57d5-7362-482b-b313-e5b5bcff3309 + false + false + true + true + jW!4$$2i + false + #/bin/sh + /usr/local/sbin/ns-guest-customization.sh "$@" + ScrumVMMast-001 + + + + Specifies Runtime info + + + ScrumVM_Master + + + VMware ESXi + 5.0.0 + VMware, Inc. + en + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,20 @@ + + + + testing instance + + + + + + + 1 + + + + + + false + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + testing instance + + + + + + + 1 + + + + + + false + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,57 @@ + + + + + + + + + + + + AllocationPool + + MB + 5120000 + 5120000 + 1984512 + 0 + + + + MHz + 130000 + 160000 + 0 + 0 + + + MB + 527360 + 527360 + 130752 + 0 + + + + + + + + + + + + + + + vmx-04 + vmx-07 + vmx-08 + + + 0 + 1024 + 150 + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/api_vdc_brokenVdc.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + AllocationPool + + MB + 5120000 + 5120000 + 1984512 + 0 + + + + MHz + 130000 + 160000 + 0 + 0 + + + MB + 527360 + 527360 + 130752 + 0 + + + + + + + + + + + + + + vmx-04 + vmx-07 + vmx-08 + + + 0 + 1024 + 150 + true + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + Specifies Guest OS Customization Settings + true + false + 9e8837e6-5c4c-4112-bf01-5498616d865f + false + false + true + true + aabbccddee + false + VMMast-001 + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_cpu.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + hertz * 10^6 + Number of Virtual CPUs + 1 virtual CPU(s) + 4 + 0 + 3 + 1 + 0 + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_disks.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,54 @@ + + + + 0 + SCSI Controller + SCSI Controller 0 + 2 + lsilogic + 6 + + + 0 + Hard disk + Hard disk 1 + + 2000 + 2 + 17 + + + 1 + Hard disk + Hard disk 2 + + 2001 + 2 + 17 + + + 2 + Hard disk + Hard disk 3 + + 2002 + 2 + 17 + + + 3 + Hard disk + Hard disk 4 + + 2003 + 2 + 17 + + + 0 + IDE Controller + IDE Controller 0 + 3 + 5 + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/get_api_vApp_vm_test_virtualHardwareSection_memory.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + byte * 2^20 + Memory Size + 4096 MB of memory + 5 + 0 + 4 + 4096 + 0 + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_cpu.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_disks.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vcloud_1_5/put_api_vApp_vm_test_virtualHardwareSection_memory.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/create_node.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/create_node.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/create_node.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/create_node.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + 1234 + 1235386846 + QUEUED + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/failure.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/failure.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/failure.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/failure.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/images.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/images.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/images.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/images.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,19 @@ + + + + + 32 + Linux + 2.6.18 + CentOS + 5.1 + root + + + ext3 + 880 + + Base install of CentOS 5.1 i386. + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/locations.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/locations.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/locations.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/locations.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ + + + + Amsterdam + + + New York + + + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/nodes.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/nodes.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/nodes.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/nodes.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ + + + + Z100.12 + Virtual Server + Voxrox Intel Platform + + LGA7 - XO / 12th Floor + Private cage + primary + Row Z + Rack 100 + 12 + + + + 172.x.x.x + + + + + user + + + root + + +
zz.zz.us.voxel.net
+ 55555 + user +
+
+ Voxel TruManaged Server Configuration 1 +
+ ... +
+
diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/success.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/success.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/success.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/success.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/unauthorized.xml libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/unauthorized.xml --- libcloud-0.5.0/libcloud/test/compute/fixtures/voxel/unauthorized.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/voxel/unauthorized.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ + + + + voxel.devices.list + + 2010-02-10T23:39:25.808107+0000 + authshouldfail + ae069bb835e998622caaddaeff8c98e0 + + YOUR_SECRETtimestamp2010-02-10T23:39:25.808107+0000methodvoxel.devices.listkeyauthshouldfail + diff -Nru libcloud-0.5.0/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json libcloud-0.15.1/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json --- libcloud-0.5.0/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/fixtures/vpsnet/_available_clouds_api10json_templates.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1 @@ +[{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"},{"id":10,"label":"CentOS 5.2 x64"},{"id":11,"label":"Gentoo 2008.0 x64"},{"id":18,"label":"Ubuntu 8.04 x64 LAMP"},{"id":19,"label":"Ubuntu 8.04 x64 MySQL"},{"id":20,"label":"Ubuntu 8.04 x64 Postfix"},{"id":21,"label":"Ubuntu 8.04 x64 Apache"},{"id":22,"label":"CentOS 5.2 x64 MySQL"},{"id":23,"label":"CentOS 5.2 x64 LAMP"},{"id":24,"label":"CentOS 5.2 x64 HAProxy"},{"id":25,"label":"CentOS 5.2 x64 Postfix"},{"id":26,"label":"CentOS 5.2 x64 Varnish"},{"id":27,"label":"CentOS 5.2 x64 Shoutcast"},{"id":28,"label":"CentOS 5.2 x64 Apache"},{"id":40,"label":"cPanel"},{"id":42,"label":"Debian 5.0 (Lenny) x64"},{"id":58,"label":"Django on Ubuntu 8.04 (x86)"},{"id":59,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":60,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":61,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":62,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":63,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":64,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":65,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":66,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":67,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":68,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":69,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":70,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":71,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":72,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":128,"label":"CentOS Optimised"},{"id":129,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":130,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":131,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":132,"label":"Debian Optimised"},{"id":133,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":134,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":135,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":136,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":137,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":138,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":139,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":140,"label":"Optimised Debian + Apache + MySQL + PHP + Magento"},{"id":141,"label":"Optimised Debian + NGINX + MySQL + PHP + Magento"},{"id":142,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":2,"label":"USA VPS Cloud"}},{"cloud":{"system_templates":[{"id":15,"label":"Ubuntu 8.04 x64"},{"id":16,"label":"CentOS 5.2 x64"},{"id":17,"label":"Gentoo 2008.0 x64"},{"id":29,"label":"Ubuntu 8.04 x64 LAMP"},{"id":30,"label":"Ubuntu 8.04 x64 MySQL"},{"id":31,"label":"Ubuntu 8.04 x64 Postfix"},{"id":32,"label":"Ubuntu 8.04 x64 Apache"},{"id":33,"label":"CentOS 5.2 x64 MySQL"},{"id":34,"label":"CentOS 5.2 x64 LAMP"},{"id":35,"label":"CentOS 5.2 x64 HAProxy"},{"id":36,"label":"CentOS 5.2 x64 Postfix"},{"id":37,"label":"CentOS 5.2 x64 Varnish"},{"id":38,"label":"CentOS 5.2 x64 Shoutcast"},{"id":39,"label":"CentOS 5.2 x64 Apache"},{"id":41,"label":"cPanel"},{"id":43,"label":"Debian 5.0 (Lenny) x64"},{"id":44,"label":"Django on Ubuntu 8.04 (x86)"},{"id":45,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":46,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":47,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":48,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":49,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":50,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":51,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":52,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":53,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":54,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":55,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":56,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":57,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":73,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":148,"label":"CentOS Optimised"},{"id":149,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":150,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":151,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":152,"label":"Debian Optimised"},{"id":153,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":154,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":155,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":156,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":157,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":158,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":159,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":160,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":3,"label":"UK VPS Cloud"}}] diff -Nru libcloud-0.5.0/libcloud/test/compute/__init__.py libcloud-0.15.1/libcloud/test/compute/__init__.py --- libcloud-0.5.0/libcloud/test/compute/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/__init__.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.base import Node, NodeImage, NodeLocation, StorageVolume +from libcloud.pricing import get_pricing + + +class TestCaseMixin(object): + should_list_locations = True + should_have_pricing = False + should_list_volumes = False + + def test_list_nodes_response(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + for node in nodes: + self.assertTrue(isinstance(node, Node)) + + def test_list_sizes_response(self): + sizes = self.driver.list_sizes() + size = sizes[0] + self.assertTrue(isinstance(sizes, list)) + # Check that size values are ints or None + self.assertTrue(size.ram is None or isinstance(size.ram, int)) + self.assertTrue(size.disk is None or isinstance(size.disk, int)) + self.assertTrue(size.bandwidth is None or + isinstance(size.bandwidth, int)) + # Check that price values are ints, floats, or None. + self.assertTrue(size.price is None or isinstance(size.price, float) + or isinstance(size.price, int)) + + def test_list_images_response(self): + images = self.driver.list_images() + self.assertTrue(isinstance(images, list)) + for image in images: + self.assertTrue(isinstance(image, NodeImage)) + + def test_list_volumes_response(self): + if not self.should_list_volumes: + return None + + volumes = self.driver.list_volumes() + self.assertTrue(isinstance(volumes, list)) + for volume in volumes: + self.assertTrue(isinstance(volume, StorageVolume)) + + def test_list_locations_response(self): + if not self.should_list_locations: + return None + + locations = self.driver.list_locations() + self.assertTrue(isinstance(locations, list)) + for dc in locations: + self.assertTrue(isinstance(dc, NodeLocation)) + + def test_create_node_response(self): + # should return a node object + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node(name='node-name', + image=image, + size=size) + self.assertTrue(isinstance(node, Node)) + + def test_destroy_node_response(self): + # should return a node object + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_reboot_node_response(self): + # should return a node object + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_get_pricing_success(self): + if not self.should_have_pricing: + return None + + driver_type = 'compute' + try: + get_pricing(driver_type=driver_type, + driver_name=self.driver.api_name) + except KeyError: + self.fail("No {driver_type!r} pricing info for {driver}.".format( + driver=self.driver.__class__.__name__, + driver_type=driver_type, + )) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/test/compute/test_abiquo.py libcloud-0.15.1/libcloud/test/compute/test_abiquo.py --- libcloud-0.5.0/libcloud/test/compute/test_abiquo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_abiquo.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,481 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Abiquo Test Suite +""" +import unittest +import sys + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.abiquo import AbiquoNodeDriver +from libcloud.common.abiquo import ForbiddenError, get_href +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.compute.base import NodeLocation, NodeImage +from libcloud.test.compute import TestCaseMixin +from libcloud.test import MockHttpTestCase +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class AbiquoNodeDriverTest(unittest.TestCase, TestCaseMixin): + + """ + Abiquo Node Driver test suite + """ + + def setUp(self): + """ + Set up the driver with the main user + """ + AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None) + self.driver = AbiquoNodeDriver('son', 'goku', + 'http://dummy.host.com/api') + + def test_unauthorized_controlled(self): + """ + Test the Unauthorized Exception is Controlled. + + Test, through the 'login' method, that a '401 Unauthorized' + raises a 'InvalidCredsError' instead of the 'MalformedUrlException' + """ + self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son', + 'goten', 'http://dummy.host.com/api') + + def test_forbidden_controlled(self): + """ + Test the Forbidden Exception is Controlled. + + Test, through the 'list_images' method, that a '403 Forbidden' + raises an 'ForbidenError' instead of the 'MalformedUrlException' + """ + AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None) + conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api') + self.assertRaises(ForbiddenError, conn.list_images) + + def test_handle_other_errors_such_as_not_found(self): + """ + Test common 'logical' exceptions are controlled. + + Test that common exception (normally 404-Not Found and 409-Conflict), + that return an XMLResponse with the explanation of the errors are + controlled. + """ + self.driver = AbiquoNodeDriver('go', 'trunks', + 'http://dummy.host.com/api') + self.assertRaises(LibcloudError, self.driver.list_images) + + def test_ex_create_and_delete_empty_group(self): + """ + Test the creation and deletion of an empty group. + """ + group = self.driver.ex_create_group('libcloud_test_group') + group.destroy() + + def test_create_node_no_image_raise_exception(self): + """ + Test 'create_node' without image. + + Test the 'create_node' function without 'image' parameter raises + an Exception + """ + self.assertRaises(LibcloudError, self.driver.create_node) + + def test_create_node_specify_location(self): + """ + Test you can create a node specifying the location. + """ + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + self.driver.create_node(image=image, location=location) + + def test_create_node_specify_wrong_location(self): + """ + Test you can not create a node with wrong location. + """ + image = self.driver.list_images()[0] + location = NodeLocation(435, 'fake-location', 'Spain', self.driver) + self.assertRaises(LibcloudError, self.driver.create_node, image=image, + location=location) + + def test_create_node_specify_wrong_image(self): + """ + Test image compatibility. + + Some locations only can handle a group of images, not all of them. + Test you can not create a node with incompatible image-location. + """ + # Create fake NodeImage + image = NodeImage(3234, 'dummy-image', self.driver) + location = self.driver.list_locations()[0] + # With this image, it should raise an Exception + self.assertRaises(LibcloudError, self.driver.create_node, image=image, + location=location) + + def test_create_node_specify_group_name(self): + """ + Test 'create_node' into a concrete group. + """ + image = self.driver.list_images()[0] + self.driver.create_node(image=image, group_name='new_group_name') + + def test_create_group_location_does_not_exist(self): + """ + Test 'create_node' with an unexistent location. + + Defines a 'fake' location and tries to create a node into it. + """ + location = NodeLocation(435, 'fake-location', 'Spain', self.driver) + # With this location, it should raise an Exception + self.assertRaises(LibcloudError, self.driver.ex_create_group, + name='new_group_name', + location=location) + + def test_destroy_node_response(self): + """ + 'destroy_node' basic test. + + Override the destroy to return a different node available + to be undeployed. (by default it returns an already undeployed node, + for test creation). + """ + self.driver = AbiquoNodeDriver('go', 'trunks', + 'http://dummy.host.com/api') + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_destroy_node_response_failed(self): + """ + 'destroy_node' asynchronous error. + + Test that the driver handles correctly when, for some reason, + the 'destroy' job fails. + """ + self.driver = AbiquoNodeDriver('muten', 'roshi', + 'http://dummy.host.com/api') + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertFalse(ret) + + def test_destroy_node_allocation_state(self): + """ + Test the 'destroy_node' invalid state. + + Try to destroy a node when the node is not running. + """ + self.driver = AbiquoNodeDriver('ve', 'geta', + 'http://dummy.host.com/api') + # Override the destroy to return a different node available to be + # undeployed + node = self.driver.list_nodes()[0] + # The mock class with the user:password 've:geta' returns a node that + # is in 'ALLOCATION' state and hence, the 'destroy_node' method should + # raise a LibcloudError + self.assertRaises(LibcloudError, self.driver.destroy_node, node) + + def test_destroy_not_deployed_group(self): + """ + Test 'ex_destroy_group' when group is not deployed. + """ + location = self.driver.list_locations()[0] + group = self.driver.ex_list_groups(location)[1] + self.assertTrue(group.destroy()) + + def test_destroy_deployed_group(self): + """ + Test 'ex_destroy_group' when there are machines running. + """ + location = self.driver.list_locations()[0] + group = self.driver.ex_list_groups(location)[0] + self.assertTrue(group.destroy()) + + def test_destroy_deployed_group_failed(self): + """ + Test 'ex_destroy_group' fails. + + Test driver handles correctly when, for some reason, the + asynchronous job fails. + """ + self.driver = AbiquoNodeDriver('muten', 'roshi', + 'http://dummy.host.com/api') + location = self.driver.list_locations()[0] + group = self.driver.ex_list_groups(location)[0] + self.assertFalse(group.destroy()) + + def test_destroy_group_invalid_state(self): + """ + Test 'ex_destroy_group' invalid state. + + Test the Driver raises an exception when the group is in + invalid temporal state. + """ + self.driver = AbiquoNodeDriver('ve', 'geta', + 'http://dummy.host.com/api') + location = self.driver.list_locations()[0] + group = self.driver.ex_list_groups(location)[1] + self.assertRaises(LibcloudError, group.destroy) + + def test_run_node(self): + """ + Test 'ex_run_node' feature. + """ + node = self.driver.list_nodes()[0] + # Node is by default in NodeState.TERMINATED and AbiquoState == + # 'NOT_ALLOCATED' + # so it is available to be runned + self.driver.ex_run_node(node) + + def test_run_node_invalid_state(self): + """ + Test 'ex_run_node' invalid state. + + Test the Driver raises an exception when try to run a + node that is in invalid state to run. + """ + self.driver = AbiquoNodeDriver('go', 'trunks', + 'http://dummy.host.com/api') + node = self.driver.list_nodes()[0] + # Node is by default in AbiquoState = 'ON' for user 'go:trunks' + # so is not available to be runned + self.assertRaises(LibcloudError, self.driver.ex_run_node, node) + + def test_run_node_failed(self): + """ + Test 'ex_run_node' fails. + + Test driver handles correctly when, for some reason, the + asynchronous job fails. + """ + self.driver = AbiquoNodeDriver('ten', 'shin', + 'http://dummy.host.com/api') + node = self.driver.list_nodes()[0] + # Node is in the correct state, but it fails because of the + # async task and it raises the error. + self.assertRaises(LibcloudError, self.driver.ex_run_node, node) + + def test_get_href(self): + xml = ''' + + + + + +''' + + elem = ET.XML(xml) + + href = get_href(element=elem, rel='edit1') + self.assertEqual(href, '/admin/datacenters/2') + href = get_href(element=elem, rel='edit2') + self.assertEqual(href, '/admin/datacenters/3') + href = get_href(element=elem, rel='edit3') + self.assertEqual(href, '/admin/enterprises/1234') + + +class AbiquoMockHttp(MockHttpTestCase): + + """ + Mock the functionallity of the remote Abiquo API. + """ + fixtures = ComputeFileFixtures('abiquo') + fixture_tag = 'default' + + def _api_login(self, method, url, body, headers): + if headers['Authorization'] == 'Basic c29uOmdvdGVu': + expected_response = self.fixtures.load('unauthorized_user.html') + expected_status = httplib.UNAUTHORIZED + else: + expected_response = self.fixtures.load('login.xml') + expected_status = httplib.OK + return (expected_status, expected_response, {}, '') + + def _api_cloud_virtualdatacenters(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '') + + def _api_cloud_virtualdatacenters_4(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers): + if method == 'POST': + vapp_name = ET.XML(body).findtext('name') + if vapp_name == 'libcloud_test_group': + # we come from 'test_ex_create_and_delete_empty_group(self):' + # method and so, we return the 'ok' return + response = self.fixtures.load('vdc_4_vapp_creation_ok.xml') + return (httplib.OK, response, {}, '') + elif vapp_name == 'new_group_name': + # we come from 'test_ex_create_and_delete_empty_group(self):' + # method and so, we return the 'ok' return + response = self.fixtures.load('vdc_4_vapp_creation_ok.xml') + return (httplib.OK, response, {}, '') + else: + # It will be a 'GET'; + return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers): + if method == 'GET': + if headers['Authorization'] == 'Basic dmU6Z2V0YQ==': + # Try to destroy a group with 'needs_sync' state + response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml') + else: + # Try to destroy a group with 'undeployed' state + response = self.fixtures.load('vdc_4_vapp_5.xml') + return (httplib.OK, response, {}, '') + else: + # it will be a 'DELETE' + return (httplib.NO_CONTENT, '', {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers): + if method == 'GET': + # deployed vapp + response = self.fixtures.load('vdc_4_vapp_6.xml') + return (httplib.OK, response, {}, '') + else: + # it will be a 'DELETE' + return (httplib.NO_CONTENT, '', {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers): + if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=': + # User 'muten:roshi' failed task + response = self.fixtures.load( + 'vdc_4_vapp_6_undeploy_task_failed.xml') + else: + response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml') + return (httplib.OK, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines( + self, method, url, body, headers): + # This virtual app never have virtual machines + if method == 'GET': + response = self.fixtures.load('vdc_4_vapp_5_vms.xml') + return (httplib.OK, response, {}, '') + elif method == 'POST': + # it must be a POST + response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml') + return (httplib.CREATED, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines( + self, method, url, body, headers): + # Default-created virtual app virtual machines' + if method == 'GET': + if headers['Authorization'] == 'Basic dmU6Z2V0YQ==': + response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml') + else: + response = self.fixtures.load('vdc_4_vapp_6_vms.xml') + return (httplib.OK, response, {}, '') + else: + # it must be a POST + response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml') + return (httplib.CREATED, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers): + if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or + headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='): + # Undeploy node + response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml") + elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==': + # Try to undeploy a node with 'allocation' state + response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml') + else: + # Get node + response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml') + return (httplib.OK, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url, + body, headers): + response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml') + return (httplib.CREATED, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers): + + if headers['Authorization'] == 'Basic dGVuOnNoaW4=': + # User 'ten:shin' failed task + response = self.fixtures.load( + 'vdc_4_vapp_6_vm_3_deploy_task_failed.xml') + else: + response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml') + return (httplib.OK, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy( + self, method, url, body, headers): + response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml') + return (httplib.OK, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method, + url, body, headers): + response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml') + return (httplib.CREATED, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers): + if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=': + # User 'muten:roshi' failed task + response = self.fixtures.load( + 'vdc_4_vapp_6_undeploy_task_failed.xml') + else: + response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml') + return (httplib.OK, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url, + body, headers): + response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml') + return (httplib.CREATED, response, {}, '') + + def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url, + body, headers): + response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml') + return (httplib.OK, response, {}, '') + + def _api_admin_datacenters(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '') + + def _api_admin_enterprises_1(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '') + + def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers): + # When the user is the common one for all the tests ('son, 'goku') + # it creates this basic auth and we return the datacenters value + if headers['Authorization'] == 'Basic Z286dHJ1bmtz': + expected_response = self.fixtures.load("not_found_error.xml") + return (httplib.NOT_FOUND, expected_response, {}, '') + elif headers['Authorization'] != 'Basic c29uOmdvaGFu': + return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '') + else: + # son:gohan user: forbidden error + expected_response = self.fixtures.load("privilege_errors.html") + return (httplib.FORBIDDEN, expected_response, {}, '') + + def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '') + + def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers): + return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'), + {}, '') + + def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers): + return ( + httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'), + {}, '') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_base.py libcloud-0.15.1/libcloud/test/compute/test_base.py --- libcloud-0.5.0/libcloud/test/compute/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_base.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.common.base import Response +from libcloud.common.base import Connection, ConnectionKey, ConnectionUserAndKey +from libcloud.common.types import LibcloudError +from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver +from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword + +from libcloud.test import MockResponse # pylint: disable-msg=E0611 + + +class FakeDriver(object): + type = 0 + + +class BaseTests(unittest.TestCase): + + def test_base_node(self): + Node(id=0, name=0, state=0, public_ips=0, private_ips=0, + driver=FakeDriver()) + + def test_base_node_size(self): + NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0, + driver=FakeDriver()) + + def test_base_node_image(self): + NodeImage(id=0, name=0, driver=FakeDriver()) + + def test_base_response(self): + Response(MockResponse(status=200, body='foo'), ConnectionKey('foo')) + + def test_base_node_driver(self): + NodeDriver('foo') + + def test_base_connection_key(self): + ConnectionKey('foo') + + def test_base_connection_userkey(self): + ConnectionUserAndKey('foo', 'bar') + + def test_base_connection_timeout(self): + Connection(timeout=10) + + +class TestValidateAuth(unittest.TestCase): + + def test_get_auth_ssh(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + auth = NodeAuthSSHKey('pubkey...') + self.assertEqual(auth, n._get_and_check_auth(auth)) + + def test_get_auth_ssh_but_given_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + auth = NodeAuthPassword('password') + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = NodeAuthPassword('password') + self.assertEqual(auth, n._get_and_check_auth(auth)) + + def test_get_auth_password_but_given_ssh_key(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = NodeAuthSSHKey('publickey') + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_default_ssh_key(self): + n = NodeDriver('foo') + n.features = {'create_node': ['ssh_key']} + self.assertEqual(None, n._get_and_check_auth(None)) + + def test_get_auth_default_password(self): + n = NodeDriver('foo') + n.features = {'create_node': ['password']} + auth = n._get_and_check_auth(None) + self.assertTrue(isinstance(auth, NodeAuthPassword)) + + def test_get_auth_default_no_feature(self): + n = NodeDriver('foo') + self.assertEqual(None, n._get_and_check_auth(None)) + + def test_get_auth_generates_password_but_given_nonsense(self): + n = NodeDriver('foo') + n.features = {'create_node': ['generates_password']} + auth = "nonsense" + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + def test_get_auth_no_features_but_given_nonsense(self): + n = NodeDriver('foo') + auth = "nonsense" + self.assertRaises(LibcloudError, n._get_and_check_auth, auth) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_bluebox.py libcloud-0.15.1/libcloud/test/compute/test_bluebox.py --- libcloud-0.5.0/libcloud/test/compute/test_bluebox.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_bluebox.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox +from libcloud.compute.base import Node, NodeAuthPassword +from libcloud.compute.types import NodeState + + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import BLUEBOX_PARAMS + + +class BlueboxTest(unittest.TestCase): + + def setUp(self): + Bluebox.connectionCls.conn_classes = (None, BlueboxMockHttp) + self.driver = Bluebox(*BLUEBOX_PARAMS) + + def test_create_node(self): + node = self.driver.create_node( + name='foo', + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + auth=NodeAuthPassword("test123") + ) + self.assertTrue(isinstance(node, Node)) + self.assertEqual(node.state, NodeState.PENDING) + self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') + self.assertEqual(node.state, NodeState.RUNNING) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 4) + + ids = [s.id for s in sizes] + + for size in sizes: + self.assertTrue(size.price > 0) + + self.assertTrue('94fd37a7-2606-47f7-84d5-9000deda52ae' in ids) + self.assertTrue('b412f354-5056-4bf0-a42f-6ddd998aa092' in ids) + self.assertTrue('0cd183d3-0287-4b1a-8288-b3ea8302ed58' in ids) + self.assertTrue('b9b87a5b-2885-4a2e-b434-44a163ca6251' in ids) + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(len(images), 10) + self.assertEqual(image.name, 'CentOS 5 (Latest Release)') + self.assertEqual(image.id, 'c66b8145-f768-45ef-9878-395bf8b1b7ff') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + +class BlueboxMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('bluebox') + + def _api_blocks_json(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load('api_blocks_json_post.json') + else: + body = self.fixtures.load('api_blocks_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_block_products_json(self, method, url, body, headers): + body = self.fixtures.load('api_block_products_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_block_templates_json(self, method, url, body, headers): + body = self.fixtures.load('api_block_templates_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json') + else: + body = self.fixtures.load( + 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(self, method, url, body, headers): + body = self.fixtures.load( + 'api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_brightbox.py libcloud-0.15.1/libcloud/test/compute/test_brightbox.py --- libcloud-0.5.0/libcloud/test/compute/test_brightbox.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_brightbox.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,350 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import base64 + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import b + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.brightbox import BrightboxNodeDriver +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import BRIGHTBOX_PARAMS + +USER_DATA = '#!/bin/sh\ntest_script.sh\n' + + +class BrightboxTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + BrightboxNodeDriver.connectionCls.conn_classes = ( + None, BrightboxMockHttp) + BrightboxMockHttp.type = None + self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS) + + def test_authentication(self): + BrightboxMockHttp.type = 'INVALID_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + BrightboxMockHttp.type = 'UNAUTHORIZED_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + def test_invalid_api_version(self): + kwargs = {'api_version': '2.0'} + self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS, **kwargs) + self.assertRaises(Exception, self.driver.list_locations) + + def test_other_host(self): + kwargs = {'host': 'api.gbt.brightbox.com'} + self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS, **kwargs) + locations = self.driver.list_locations() + self.assertEqual(len(locations), 0) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + self.assertEqual(len(nodes[0].public_ips), 1) + self.assertEqual(len(nodes[1].public_ips), 1) + self.assertEqual(len(nodes[0].private_ips), 1) + self.assertEqual(len(nodes[1].private_ips), 1) + self.assertTrue('109.107.35.16' in nodes[0].public_ips) + self.assertTrue('10.74.210.210' in nodes[0].private_ips) + self.assertTrue('10.240.228.234' in nodes[1].private_ips) + self.assertTrue( + '2a02:1348:14c:393a:24:19ff:fef0:e4ea' in nodes[1].public_ips) + self.assertEqual(nodes[0].state, NodeState.RUNNING) + self.assertEqual(nodes[1].state, NodeState.RUNNING) + + def test_list_node_extras(self): + nodes = self.driver.list_nodes() + self.assertFalse(nodes[0].size is None) + self.assertFalse(nodes[1].size is None) + self.assertFalse(nodes[0].image is None) + self.assertFalse(nodes[1].image is None) + self.assertEqual(nodes[0].image.id, 'img-arm8f') + self.assertEqual(nodes[0].size.id, 'typ-urtky') + self.assertEqual(nodes[1].image.id, 'img-j93gd') + self.assertEqual(nodes[1].size.id, 'typ-qdiwq') + self.assertEqual(nodes[0].extra['fqdn'], 'srv-xvpn7.gb1.brightbox.com') + self.assertEqual(nodes[1].extra['fqdn'], 'srv-742vn.gb1.brightbox.com') + self.assertEqual(nodes[0].extra['hostname'], 'srv-xvpn7') + self.assertEqual(nodes[1].extra['hostname'], 'srv-742vn') + self.assertEqual(nodes[0].extra['status'], 'active') + self.assertEqual(nodes[1].extra['status'], 'active') + self.assertTrue('interfaces' in nodes[0].extra) + self.assertTrue('zone' in nodes[0].extra) + self.assertTrue('snapshots' in nodes[0].extra) + self.assertTrue('server_groups' in nodes[0].extra) + self.assertTrue('started_at' in nodes[0].extra) + self.assertTrue('created_at' in nodes[0].extra) + self.assertFalse('deleted_at' in nodes[0].extra) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 7) + self.assertEqual(sizes[0].id, 'typ-4nssg') + self.assertEqual(sizes[0].name, 'Brightbox Nano Instance') + self.assertEqual(sizes[0].ram, 512) + self.assertEqual(sizes[0].disk, 20480) + self.assertEqual(sizes[0].bandwidth, 0) + self.assertEqual(sizes[0].price, 0) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 3) + self.assertEqual(images[0].id, 'img-99q79') + self.assertEqual(images[0].name, 'CentOS 5.5 server') + self.assertTrue('ancestor' in images[0].extra) + self.assertFalse('licence_name' in images[0].extra) + + def test_list_images_extras(self): + images = self.driver.list_images() + extra = images[-1].extra + self.assertEqual(extra['arch'], 'i686') + self.assertFalse(extra['compatibility_mode']) + self.assertEqual(extra['created_at'], '2012-01-22T05:36:24Z') + self.assertTrue('description' in extra) + self.assertEqual(extra['disk_size'], 671) + self.assertFalse('min_ram' in extra) + self.assertFalse(extra['official']) + self.assertEqual(extra['owner'], 'acc-tqs4c') + self.assertTrue(extra['public']) + self.assertEqual(extra['source'], 'oneiric-i386-20178.gz') + self.assertEqual(extra['source_type'], 'upload') + self.assertEqual(extra['status'], 'deprecated') + self.assertEqual(extra['username'], 'ubuntu') + self.assertEqual(extra['virtual_size'], 1025) + self.assertFalse('ancestor' in extra) + self.assertFalse('licence_name' in extra) + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(locations[0].id, 'zon-6mxqw') + self.assertEqual(locations[0].name, 'gb1-a') + self.assertEqual(locations[1].id, 'zon-remk1') + self.assertEqual(locations[1].name, 'gb1-b') + + def test_reboot_node_response(self): + node = self.driver.list_nodes()[0] + self.assertRaises(NotImplementedError, self.driver.reboot_node, [node]) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_create_node(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name='Test Node', image=image, size=size) + self.assertEqual('srv-p61uj', node.id) + self.assertEqual('Test Node', node.name) + self.assertEqual('gb1-a', node.extra['zone'].name) + + def test_create_node_in_location(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[1] + node = self.driver.create_node( + name='Test Node', image=image, size=size, location=location) + self.assertEqual('srv-nnumd', node.id) + self.assertEqual('Test Node', node.name) + self.assertEqual('gb1-b', node.extra['zone'].name) + + def test_create_node_with_user_data(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name='Test Node', image=image, size=size, ex_userdata=USER_DATA) + decoded = base64.b64decode(b(node.extra['user_data'])).decode('ascii') + self.assertEqual('gb1-a', node.extra['zone'].name) + self.assertEqual(USER_DATA, decoded) + + def test_create_node_with_a_server_group(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name='Test Node', image=image, size=size, ex_servergroup='grp-12345') + self.assertEqual('gb1-a', node.extra['zone'].name) + self.assertEqual(len(node.extra['server_groups']), 1) + self.assertEqual(node.extra['server_groups'][0]['id'], 'grp-12345') + + def test_create_node_with_a_list_of_server_groups(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name='Test Node', image=image, size=size, ex_servergroup=['grp-12345', 'grp-67890']) + self.assertEqual('gb1-a', node.extra['zone'].name) + self.assertEqual(len(node.extra['server_groups']), 2) + self.assertEqual(node.extra['server_groups'][0]['id'], 'grp-12345') + self.assertEqual(node.extra['server_groups'][1]['id'], 'grp-67890') + + def test_list_cloud_ips(self): + cip_list = self.driver.ex_list_cloud_ips() + self.assertEqual(len(cip_list), 4) + self.assertEqual(cip_list[2]['status'], 'mapped') + cip_check = cip_list[0] + self.assertEqual(cip_check['id'], 'cip-tlrp3') + self.assertEqual(cip_check['public_ip'], '109.107.35.16') + self.assertEqual( + cip_check['reverse_dns'], 'cip-109-107-35-16.gb1.brightbox.com') + self.assertEqual(cip_check['status'], 'unmapped') + self.assertTrue(cip_check['server'] is None) + self.assertTrue(cip_check['server_group'] is None) + self.assertTrue(cip_check['interface'] is None) + self.assertTrue(cip_check['load_balancer'] is None) + + def test_create_cloud_ip(self): + cip = self.driver.ex_create_cloud_ip() + self.assertEqual(cip['id'], 'cip-jsjc5') + self.assertEqual( + cip['reverse_dns'], 'cip-109-107-37-234.gb1.brightbox.com') + + def test_create_cloud_ip_with_dns(self): + cip = self.driver.ex_create_cloud_ip('fred.co.uk') + self.assertEqual(cip['id'], 'cip-jsjc5') + self.assertEqual(cip['reverse_dns'], 'fred.co.uk') + + def test_map_cloud_ip(self): + self.assertTrue(self.driver.ex_map_cloud_ip('cip-jsjc5', 'int-ztqbx')) + + def test_unmap_cloud_ip(self): + self.assertTrue(self.driver.ex_unmap_cloud_ip('cip-jsjc5')) + + def test_update_cloud_ip(self): + self.assertTrue( + self.driver.ex_update_cloud_ip('cip-jsjc5', 'fred.co.uk')) + + def test_destroy_cloud_ip(self): + self.assertTrue(self.driver.ex_destroy_cloud_ip('cip-jsjc5')) + + +class BrightboxMockHttp(MockHttp): + fixtures = ComputeFileFixtures('brightbox') + + def _token(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.OK, self.fixtures.load('token.json')) + + def _token_INVALID_CLIENT(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.BAD_REQUEST, '{"error":"invalid_client"}') + + def _token_UNAUTHORIZED_CLIENT(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.UNAUTHORIZED, '{"error":"unauthorized_client"}') + + def _1_0_images(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_images.json')) + + def _1_0_servers(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_servers.json')) + elif method == 'POST': + body = json.loads(body) + encoded = base64.b64encode(b(USER_DATA)).decode('ascii') + + if 'user_data' in body and body['user_data'] != encoded: + data = '{"error_name":"dodgy user data", "errors": ["User data not encoded properly"]}' + return self.response(httplib.BAD_REQUEST, data) + if body.get('zone', '') == 'zon-remk1': + node = json.loads( + self.fixtures.load('create_server_gb1_b.json')) + else: + node = json.loads( + self.fixtures.load('create_server_gb1_a.json')) + node['name'] = body['name'] + if 'server_groups' in body: + node['server_groups'] = [{'id': x} + for x in body['server_groups']] + if 'user_data' in body: + node['user_data'] = body['user_data'] + return self.response(httplib.ACCEPTED, json.dumps(node)) + + def _1_0_servers_srv_xvpn7(self, method, url, body, headers): + if method == 'DELETE': + return self.response(httplib.ACCEPTED, '') + + def _1_0_server_types(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_server_types.json')) + + def _1_0_zones(self, method, url, body, headers): + if method == 'GET': + if headers['Host'] == 'api.gbt.brightbox.com': + return self.response(httplib.OK, "{}") + else: + return self.response(httplib.OK, self.fixtures.load('list_zones.json')) + + def _2_0_zones(self, method, url, body, headers): + data = '{"error_name":"unrecognised_endpoint", "errors": ["The request was for an unrecognised API endpoint"]}' + return self.response(httplib.BAD_REQUEST, data) + + def _1_0_cloud_ips(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_cloud_ips.json')) + elif method == 'POST': + if body: + body = json.loads(body) + + node = json.loads(self.fixtures.load('create_cloud_ip.json')) + + if 'reverse_dns' in body: + node['reverse_dns'] = body['reverse_dns'] + return self.response(httplib.ACCEPTED, json.dumps(node)) + + def _1_0_cloud_ips_cip_jsjc5(self, method, url, body, headers): + if method == 'DELETE': + return self.response(httplib.OK, '') + elif method == 'PUT': + body = json.loads(body) + if body.get('reverse_dns', None) == 'fred.co.uk': + return self.response(httplib.OK, '') + else: + return self.response(httplib.BAD_REQUEST, '{"error_name":"bad dns", "errors": ["Bad dns"]}') + + def _1_0_cloud_ips_cip_jsjc5_map(self, method, url, body, headers): + if method == 'POST': + body = json.loads(body) + if 'destination' in body: + return self.response(httplib.ACCEPTED, '') + else: + data = '{"error_name":"bad destination", "errors": ["Bad destination"]}' + return self.response(httplib.BAD_REQUEST, data) + + def _1_0_cloud_ips_cip_jsjc5_unmap(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.ACCEPTED, '') + + def response(self, status, body): + return (status, body, {'content-type': 'application/json'}, httplib.responses[status]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) + +# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 +# filetype=python diff -Nru libcloud-0.5.0/libcloud/test/compute/test_cloudframes.py libcloud-0.15.1/libcloud/test/compute/test_cloudframes.py --- libcloud-0.5.0/libcloud/test/compute/test_cloudframes.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_cloudframes.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,292 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import uuid +import base64 +import unittest +import datetime + +from libcloud.compute.base import Node, NodeImage, NodeLocation +from libcloud.compute.types import NodeState +from libcloud.compute.drivers.cloudframes import CloudFramesNodeDriver +from libcloud.compute.drivers.cloudframes import CloudFramesSnapshot + +from libcloud.utils.py3 import httplib, xmlrpclib, b +from libcloud.test import MockHttpTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.secrets import CLOUDFRAMES_PARAMS +from libcloud.test.file_fixtures import ComputeFileFixtures + + +# how many seconds to give the vm to boot and have VMWare tools start up +START_TIMEOUT = 300 + + +class CloudFramesMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('cloudframes') + + content_headers = { + 'Server': 'TwistedWeb/9.0.0', + 'Date': datetime.datetime.now().ctime(), + 'Content-Type': 'text/xml', + } + + def __getattr__(self, key): + if key == '_appserver_xmlrpc_http:__host:8888_appserver_xmlrpc': + return self._xmlrpc + raise AttributeError(key) + + def _xmlrpc(self, method, url, body, headers): + params, methodname = xmlrpclib.loads(body) + meth_name = methodname.replace('.', '_').replace('cloud_api_', '') + return getattr(self, meth_name)(method, url, params, headers) + + def _authenticate(self, headers): + self.assertTrue('Authorization' in headers.keys()) + self.assertTrue(headers['Authorization'].startswith('Basic ')) + auth = base64.b64decode( + b(headers['Authorization'].split(' ', 1)[1])).decode('ascii') + username, password = auth.split(':', 1) + self.assertEqual(username, CLOUDFRAMES_PARAMS[0]) + self.assertEqual(password, CLOUDFRAMES_PARAMS[1]) + + def cloudspace_find(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_cloudspace_find.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_find(self, method, url, params, headers): + self._authenticate(headers) + if params[18]: + body = self.fixtures.load('_machine_find_templates.xml') + elif params[17] == 'PHYSICAL': + body = self.fixtures.load('_machine_find_physical.xml') + elif params[17] == 'VIRTUALSERVER': + body = self.fixtures.load('_machine_find_virtualserver.xml') + elif params[17] == 'VIRTUALDESKTOP': + body = self.fixtures.load('_machine_find_virtualdesktop.xml') + else: + raise Exception( + 'unknown machine.find query with params: %s' % params) + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_list(self, method, url, params, headers): + self._authenticate(headers) + body = None + if params[3]: + body = self.fixtures.load( + '_machine_list_machineguid_%s.xml' % params[3]) + if body: + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + else: + return (httplib.INTERNAL_SERVER_ERROR, '', + self.content_headers, 'Could not parse request') + + def machine_delete(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_delete.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_stop(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_stop.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_reboot(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_reboot.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_createFromTemplate(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_createFromTemplate.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_start(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_start.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_snapshot(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_snapshot.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_listSnapshots(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_listSnapshots.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def machine_rollback(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_machine_rollback.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + def lan_find(self, method, url, params, headers): + self._authenticate(headers) + body = self.fixtures.load('_lan_find.xml') + return (httplib.OK, body, self.content_headers, + httplib.responses[httplib.OK]) + + +class CloudFramesTests(unittest.TestCase, TestCaseMixin): + + should_list_locations = True + should_have_pricing = False + should_list_volumes = False + + def __init__(self, name, url=None): + self.url = url + super(CloudFramesTests, self).__init__(name) + + def setUp(self): + if self.url: + args = () + kwargs = {'url': self.url} + else: + CloudFramesNodeDriver.connectionCls.conn_classes = ( + CloudFramesMockHttp, CloudFramesMockHttp) + args = CLOUDFRAMES_PARAMS + kwargs = {} + self.driver = CloudFramesNodeDriver(*args, **kwargs) + + def _retry_until_up(self, cmd, *args, **kwargs): + """ + When testing against a live system, this will cause the given command + to be retried until it succeeds. + (Calls like snapshot/reboot will fail until the vm has started fully.) + """ + now = datetime.datetime.now() + while not (datetime.datetime.now() - now).seconds > START_TIMEOUT: + try: + return cmd(*args, **kwargs) + except: + pass + else: + raise Exception('VMWare tools did not become available in time') + + def test_connection(self): + key, secret, secure, host, port = CLOUDFRAMES_PARAMS + CloudFramesNodeDriver(key, secret, secure, host) + CloudFramesNodeDriver(key, secret, secure, host, 80) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + key, secret, True, host, 80) + CloudFramesNodeDriver(key, secret, secure, host, '80') + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + key, secure=secure, host=host) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + secret=secret, secure=secure, host=host) + CloudFramesNodeDriver( + url='http://%s:%s@%s:80/appserver/xmlrpc' % (key, secret, host)) + CloudFramesNodeDriver( + url='http://%s:%s@%s/appserver/xmlrpc' % (key, secret, host)) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + key=key, secret=secret, + url='https://%s/appserver/xmlrpc' % host) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + key=key, secret=secret, secure=False, + url='https://%s/appserver/xmlrpc' % host) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + url='http://%s@%s/appserver/xmlrpc' % (key, host)) + CloudFramesNodeDriver( + secret=secret, url='http://%s@%s/appserver/xmlrpc' % (key, host)) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + url='http://%s/appserver/xmlrpc' % host) + self.assertRaises( + NotImplementedError, CloudFramesNodeDriver, + secret=secret, url='http://%s/appserver/xmlrpc' % host) + self.assertRaises(NotImplementedError, CloudFramesNodeDriver, + key=key, url='http://%s/appserver/xmlrpc' % host) + CloudFramesNodeDriver( + key=key, secret=secret, url='http://%s/appserver/xmlrpc' % host) + + def test_snapshot(self): + nodes = [node for node in self.driver.list_nodes() + if node.state == NodeState.RUNNING] + if not nodes: + raise Exception('No running vm to test snapshotting') + self._test_snapshot(nodes[0]) + + def _test_snapshot(self, node): + if self.url: + self.assertEqual(len(self.driver.ex_list_snapshots(node)), 0) + snapshot1 = self._retry_until_up( + self.driver.ex_snapshot_node, node) + self.assertTrue(isinstance(snapshot1, CloudFramesSnapshot)) + if self.url: + self.assertEqual(len(self.driver.ex_list_snapshots(node)), 1) + snapshot2 = self.driver.ex_snapshot_node(node) + self.assertTrue(isinstance(snapshot2, CloudFramesSnapshot)) + if self.url: + self.assertEqual(len(self.driver.ex_list_snapshots(node)), 2) + self.driver.ex_destroy_snapshot(node, snapshot2) + if self.url: + self.assertEqual(len(self.driver.ex_list_snapshots(node)), 1) + self.driver.ex_rollback_node(node, snapshot1) + if self.url: + self.assertEqual(len(self.driver.ex_list_snapshots(node)), 1) + self.driver.ex_destroy_snapshot(node, snapshot1) + + def test_comprehensive(self): + """ + Creates a node with the first location, image and size it finds. + + Then boots the node, reboots, creates two snapshots. + Deletes one snapshot, rolls back to the other, then destroys the node. + + In between these operations it verifies the node status and lists. + """ + if not self.url: + return + location = self.driver.list_locations()[0] + self.assertTrue(isinstance(location, NodeLocation)) + image = self.driver.list_images()[0] + self.assertTrue(isinstance(image, NodeImage)) + size = self.driver.list_sizes()[0] + name = 'AUTOTEST_%s' % uuid.uuid4() + node = self.driver.create_node( + image=image, name=name, size=size, location=location) + # give the node time to boot up and load the vmware tools + self.assertTrue(isinstance(node, Node)) + self.assertTrue(node.id in [x.id for x in self.driver.list_nodes()]) + self.assertTrue(node.state == NodeState.RUNNING) + self._test_snapshot(node) + self._retry_until_up(self.driver.reboot_node, node) + self.driver.destroy_node(node) + self.assertFalse(node.id in [x.id for x in self.driver.list_nodes()]) + + +if __name__ == '__main__': + # add a full url as first arg to this script to test against a live system + # fi: http://key:secret@host:port/appserver/xmlrpc + if len(sys.argv) > 1: + suite = unittest.TestSuite() + suite.addTest(CloudFramesTests('test_comprehensive', sys.argv[1])) + if not unittest.TextTestRunner().run(suite).wasSuccessful(): + sys.exit(1) + del sys.argv[1] + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_cloudsigma_v1_0.py libcloud-0.15.1/libcloud/test/compute/test_cloudsigma_v1_0.py --- libcloud-0.5.0/libcloud/test/compute/test_cloudsigma_v1_0.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_cloudsigma_v1_0.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,223 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.base import Node +from libcloud.compute.drivers.cloudsigma import CloudSigmaNodeDriver +from libcloud.compute.drivers.cloudsigma import CloudSigmaZrhNodeDriver +from libcloud.utils.misc import str2dicts, str2list, dict2str + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class CloudSigmaAPI10BaseTestCase(object): + should_list_locations = False + + driver_klass = CloudSigmaZrhNodeDriver + driver_kwargs = {} + + def setUp(self): + self.driver = self.driver_klass(*self.driver_args, + **self.driver_kwargs) + + self.driver.connectionCls.conn_classes = (None, + CloudSigmaHttp) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + self.assertEqual(len(nodes), 1) + + node = nodes[0] + self.assertEqual(node.public_ips[0], "1.2.3.4") + self.assertEqual(node.extra['smp'], 1) + self.assertEqual(node.extra['cpu'], 1100) + self.assertEqual(node.extra['mem'], 640) + + def test_list_sizes(self): + images = self.driver.list_sizes() + self.assertEqual(len(images), 9) + + def test_list_images(self): + sizes = self.driver.list_images() + self.assertEqual(len(sizes), 10) + + def test_start_node(self): + nodes = self.driver.list_nodes() + node = nodes[0] + self.assertTrue(self.driver.ex_start_node(node)) + + def test_shutdown_node(self): + nodes = self.driver.list_nodes() + node = nodes[0] + self.assertTrue(self.driver.ex_stop_node(node)) + self.assertTrue(self.driver.ex_shutdown_node(node)) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + self.driver.list_nodes() + + def test_create_node(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name="cloudsigma node", image=image, size=size) + self.assertTrue(isinstance(node, Node)) + + def test_ex_static_ip_list(self): + ips = self.driver.ex_static_ip_list() + self.assertEqual(len(ips), 3) + + def test_ex_static_ip_create(self): + result = self.driver.ex_static_ip_create() + self.assertEqual(len(result), 2) + self.assertEqual(len(list(result[0].keys())), 6) + self.assertEqual(len(list(result[1].keys())), 6) + + def test_ex_static_ip_destroy(self): + result = self.driver.ex_static_ip_destroy('1.2.3.4') + self.assertTrue(result) + + def test_ex_drives_list(self): + result = self.driver.ex_drives_list() + self.assertEqual(len(result), 2) + + def test_ex_drive_destroy(self): + result = self.driver.ex_drive_destroy( + # @@TR: this should be soft-coded: + 'd18119ce_7afa_474a_9242_e0384b160220') + self.assertTrue(result) + + def test_ex_set_node_configuration(self): + node = self.driver.list_nodes()[0] + result = self.driver.ex_set_node_configuration(node, **{'smp': 2}) + self.assertTrue(result) + + def test_str2dicts(self): + string = 'mem 1024\ncpu 2200\n\nmem2048\cpu 1100' + result = str2dicts(string) + self.assertEqual(len(result), 2) + + def test_str2list(self): + string = 'ip 1.2.3.4\nip 1.2.3.5\nip 1.2.3.6' + result = str2list(string) + self.assertEqual(len(result), 3) + self.assertEqual(result[0], '1.2.3.4') + self.assertEqual(result[1], '1.2.3.5') + self.assertEqual(result[2], '1.2.3.6') + + def test_dict2str(self): + d = {'smp': 5, 'cpu': 2200, 'mem': 1024} + result = dict2str(d) + self.assertTrue(len(result) > 0) + self.assertTrue(result.find('smp 5') >= 0) + self.assertTrue(result.find('cpu 2200') >= 0) + self.assertTrue(result.find('mem 1024') >= 0) + + +class CloudSigmaAPI10DirectTestCase(CloudSigmaAPI10BaseTestCase, + unittest.TestCase): + driver_klass = CloudSigmaZrhNodeDriver + driver_args = ('foo', 'bar') + driver_kwargs = {} + + +class CloudSigmaAPI10IndiretTestCase(CloudSigmaAPI10BaseTestCase, + unittest.TestCase): + driver_klass = CloudSigmaNodeDriver + driver_args = ('foo', 'bar') + driver_kwargs = {'api_version': '1.0'} + + +class CloudSigmaHttp(MockHttp): + fixtures = ComputeFileFixtures('cloudsigma') + + def _drives_standard_info(self, method, url, body, headers): + body = self.fixtures.load('drives_standard_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_start( + self, method, url, body, headers): + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_stop( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_destroy( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, + body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_d18119ce_7afa_474a_9242_e0384b160220_clone( + self, method, url, body, headers): + + body = self.fixtures.load('drives_clone.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_a814def5_1789_49a0_bf88_7abe7bb1682a_info( + self, method, url, body, headers): + + body = self.fixtures.load('drives_single_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_info(self, method, url, body, headers): + body = self.fixtures.load('drives_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_create(self, method, url, body, headers): + body = self.fixtures.load('servers_create.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_info(self, method, url, body, headers): + body = self.fixtures.load('servers_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_list(self, method, url, body, headers): + body = self.fixtures.load('resources_ip_list.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_create(self, method, url, body, headers): + body = self.fixtures.load('resources_ip_create.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_1_2_3_4_destroy(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _drives_d18119ce_7afa_474a_9242_e0384b160220_destroy( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_set( + self, method, url, body, headers): + + body = self.fixtures.load('servers_set.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_cloudsigma_v2_0.py libcloud-0.15.1/libcloud/test/compute/test_cloudsigma_v2_0.py --- libcloud-0.5.0/libcloud/test/compute/test_cloudsigma_v2_0.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_cloudsigma_v2_0.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,646 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +try: + import simplejson as json +except: + import json + +from libcloud.utils.py3 import httplib + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.cloudsigma import CloudSigmaNodeDriver +from libcloud.compute.drivers.cloudsigma import CloudSigma_2_0_NodeDriver +from libcloud.compute.drivers.cloudsigma import CloudSigmaError +from libcloud.compute.types import NodeState + +from libcloud.test import unittest +from libcloud.test import MockHttpTestCase +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class CloudSigmaAPI20BaseTestCase(object): + def setUp(self): + self.driver_klass.connectionCls.conn_classes = \ + (CloudSigmaMockHttp, CloudSigmaMockHttp) + + CloudSigmaMockHttp.type = None + CloudSigmaMockHttp.use_param = 'do' + + self.driver = self.driver_klass(*self.driver_args, + **self.driver_kwargs) + self.driver.DRIVE_TRANSITION_SLEEP_INTERVAL = 0.1 + self.driver.DRIVE_TRANSITION_TIMEOUT = 1 + self.node = self.driver.list_nodes()[0] + + def test_invalid_api_versions(self): + expected_msg = 'Unsupported API version: invalid' + self.assertRaisesRegexp(NotImplementedError, expected_msg, + CloudSigmaNodeDriver, 'username', 'password', + api_version='invalid') + + def test_invalid_credentials(self): + CloudSigmaMockHttp.type = 'INVALID_CREDS' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + def test_invalid_region(self): + expected_msg = 'Invalid region:' + self.assertRaisesRegexp(ValueError, expected_msg, + CloudSigma_2_0_NodeDriver, 'foo', 'bar', + region='invalid') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + size = sizes[0] + self.assertEqual(size.id, 'micro-regular') + + def test_list_images(self): + images = self.driver.list_images() + + image = images[0] + self.assertEqual(image.name, 'ubuntu-10.04-toMP') + self.assertEqual(image.extra['image_type'], 'preinst') + self.assertEqual(image.extra['media'], 'disk') + self.assertEqual(image.extra['os'], 'linux') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + + node = nodes[0] + self.assertEqual(len(nodes), 2) + self.assertEqual(node.id, '9de75ed6-fd33-45e2-963f-d405f31fd911') + self.assertEqual(node.name, 'test no drives') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.public_ips, ['185.12.5.181', '178.22.68.55']) + self.assertEqual(node.private_ips, []) + + def test_create_node(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + metadata = {'foo': 'bar'} + + node = self.driver.create_node(name='test node', size=size, image=image, + ex_metadata=metadata) + self.assertEqual(node.name, 'test node') + self.assertEqual(len(node.extra['nics']), 1) + self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp') + + def test_create_node_with_vlan(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + + vlan_uuid = '39ae851d-433f-4ac2-a803-ffa24cb1fa3e' + + node = self.driver.create_node(name='test node vlan', size=size, + image=image, ex_vlan=vlan_uuid) + self.assertEqual(node.name, 'test node vlan') + self.assertEqual(len(node.extra['nics']), 2) + self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp') + self.assertEqual(node.extra['nics'][1]['vlan']['uuid'], vlan_uuid) + + def test_destroy_node(self): + status = self.driver.destroy_node(node=self.node) + self.assertTrue(status) + + def test_ex_start_node(self): + status = self.driver.ex_start_node(node=self.node) + self.assertTrue(status) + + def test_ex_start_node_avoid_mode(self): + CloudSigmaMockHttp.type = 'AVOID_MODE' + ex_avoid = ['1', '2'] + status = self.driver.ex_start_node(node=self.node, + ex_avoid=ex_avoid) + self.assertTrue(status) + + def test_ex_start_node_already_started(self): + CloudSigmaMockHttp.type = 'ALREADY_STARTED' + + expected_msg = 'Cannot start guest in state "started". Guest should ' \ + 'be in state "stopped' + + self.assertRaisesRegexp(CloudSigmaError, expected_msg, + self.driver.ex_start_node, node=self.node) + + def test_ex_stop_node(self): + status = self.driver.ex_stop_node(node=self.node) + self.assertTrue(status) + + def test_ex_stop_node_already_stopped(self): + CloudSigmaMockHttp.type = 'ALREADY_STOPPED' + + expected_msg = 'Cannot stop guest in state "stopped"' + self.assertRaisesRegexp(CloudSigmaError, expected_msg, + self.driver.ex_stop_node, node=self.node) + + def test_ex_clone_node(self): + node_to_clone = self.driver.list_nodes()[0] + + cloned_node = self.driver.ex_clone_node(node=node_to_clone, + name='test cloned node') + self.assertEqual(cloned_node.name, 'test cloned node') + + def test_ex_open_vnc_tunnel(self): + node = self.driver.list_nodes()[0] + vnc_url = self.driver.ex_open_vnc_tunnel(node=node) + self.assertEqual(vnc_url, 'vnc://direct.lvs.cloudsigma.com:41111') + + def test_ex_close_vnc_tunnel(self): + node = self.driver.list_nodes()[0] + status = self.driver.ex_close_vnc_tunnel(node=node) + self.assertTrue(status) + + def test_ex_list_library_drives(self): + drives = self.driver.ex_list_library_drives() + + drive = drives[0] + self.assertEqual(drive.name, 'IPCop 2.0.2') + self.assertEqual(drive.size, 1000000000) + self.assertEqual(drive.media, 'cdrom') + self.assertEqual(drive.status, 'unmounted') + + def test_ex_list_user_drives(self): + drives = self.driver.ex_list_user_drives() + + drive = drives[0] + self.assertEqual(drive.name, 'test node 2-drive') + self.assertEqual(drive.size, 13958643712) + self.assertEqual(drive.media, 'disk') + self.assertEqual(drive.status, 'unmounted') + + def test_ex_create_drive(self): + CloudSigmaMockHttp.type = 'CREATE' + + name = 'test drive 5' + size = 2000 * 1024 * 1024 + + drive = self.driver.ex_create_drive(name=name, size=size, media='disk') + self.assertEqual(drive.name, 'test drive 5') + self.assertEqual(drive.media, 'disk') + + def test_ex_clone_drive(self): + drive = self.driver.ex_list_user_drives()[0] + cloned_drive = self.driver.ex_clone_drive(drive=drive, + name='cloned drive') + + self.assertEqual(cloned_drive.name, 'cloned drive') + + def test_ex_resize_drive(self): + drive = self.driver.ex_list_user_drives()[0] + + size = 1111 * 1024 * 1024 + + resized_drive = self.driver.ex_resize_drive(drive=drive, size=size) + self.assertEqual(resized_drive.name, 'test drive 5') + self.assertEqual(resized_drive.media, 'disk') + self.assertEqual(resized_drive.size, size) + + def test_ex_list_firewall_policies(self): + policies = self.driver.ex_list_firewall_policies() + + policy = policies[1] + rule = policy.rules[0] + self.assertEqual(policy.name, 'My awesome policy') + self.assertEqual(rule.action, 'drop') + self.assertEqual(rule.direction, 'out') + self.assertEqual(rule.dst_ip, '23.0.0.0/32') + self.assertEqual(rule.ip_proto, 'tcp') + self.assertEqual(rule.dst_port, None) + self.assertEqual(rule.src_ip, None) + self.assertEqual(rule.src_port, None) + self.assertEqual(rule.comment, 'Drop traffic from the VM to IP address 23.0.0.0/32') + + def test_ex_create_firewall_policy_no_rules(self): + CloudSigmaMockHttp.type = 'CREATE_NO_RULES' + policy = self.driver.ex_create_firewall_policy(name='test policy 1') + + self.assertEqual(policy.name, 'test policy 1') + self.assertEqual(policy.rules, []) + + def test_ex_create_firewall_policy_with_rules(self): + CloudSigmaMockHttp.type = 'CREATE_WITH_RULES' + rules = [ + { + 'action': 'accept', + 'direction': 'out', + 'ip_proto': 'tcp', + 'src_ip': '127.0.0.1', + 'dst_ip': '127.0.0.1' + } + ] + + policy = self.driver.ex_create_firewall_policy(name='test policy 2', + rules=rules) + rule = policy.rules[0] + + self.assertEqual(policy.name, 'test policy 2') + self.assertEqual(len(policy.rules), 1) + + self.assertEqual(rule.action, 'accept') + self.assertEqual(rule.direction, 'out') + self.assertEqual(rule.ip_proto, 'tcp') + + def test_ex_attach_firewall_policy(self): + policy = self.driver.ex_list_firewall_policies()[0] + node = self.driver.list_nodes()[0] + + CloudSigmaMockHttp.type = 'ATTACH_POLICY' + updated_node = self.driver.ex_attach_firewall_policy(policy=policy, + node=node) + nic = updated_node.extra['nics'][0] + self.assertEqual(nic['firewall_policy']['uuid'], + '461dfb8c-e641-43d7-a20e-32e2aa399086') + + def test_ex_attach_firewall_policy_inexistent_nic(self): + policy = self.driver.ex_list_firewall_policies()[0] + node = self.driver.list_nodes()[0] + + nic_mac = 'inexistent' + expected_msg = 'Cannot find the NIC interface to attach a policy to' + self.assertRaisesRegexp(ValueError, expected_msg, + self.driver.ex_attach_firewall_policy, + policy=policy, + node=node, + nic_mac=nic_mac) + + def test_ex_delete_firewall_policy(self): + policy = self.driver.ex_list_firewall_policies()[0] + status = self.driver.ex_delete_firewall_policy(policy=policy) + self.assertTrue(status) + + def test_ex_list_tags(self): + tags = self.driver.ex_list_tags() + + tag = tags[0] + self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d') + self.assertEqual(tag.name, 'test tag 2') + + def test_ex_get_tag(self): + tag = self.driver.ex_get_tag(tag_id='a010ec41-2ead-4630-a1d0-237fa77e4d4d') + + self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d') + self.assertEqual(tag.name, 'test tag 2') + + def test_ex_create_tag(self): + tag = self.driver.ex_create_tag(name='test tag 3') + self.assertEqual(tag.name, 'test tag 3') + + def test_ex_create_tag_with_resources(self): + CloudSigmaMockHttp.type = 'WITH_RESOURCES' + resource_uuids = ['1'] + tag = self.driver.ex_create_tag(name='test tag 3', + resource_uuids=resource_uuids) + self.assertEqual(tag.name, 'test tag 3') + self.assertEqual(tag.resources, resource_uuids) + + def test_ex_tag_resource(self): + node = self.driver.list_nodes()[0] + tag = self.driver.ex_list_tags()[0] + + updated_tag = self.driver.ex_tag_resource(resource=node, tag=tag) + self.assertEqual(updated_tag.name, 'test tag 3') + + def test_ex_tag_resources(self): + nodes = self.driver.list_nodes() + tag = self.driver.ex_list_tags()[0] + + updated_tag = self.driver.ex_tag_resources(resources=nodes, tag=tag) + self.assertEqual(updated_tag.name, 'test tag 3') + + def test_ex_tag_resource_invalid_resource_object(self): + tag = self.driver.ex_list_tags()[0] + + expected_msg = 'Resource doesn\'t have id attribute' + self.assertRaisesRegexp(ValueError, expected_msg, + self.driver.ex_tag_resource, tag=tag, + resource={}) + + def test_ex_delete_tag(self): + tag = self.driver.ex_list_tags()[0] + status = self.driver.ex_delete_tag(tag=tag) + self.assertTrue(status) + + def test_ex_get_balance(self): + balance = self.driver.ex_get_balance() + self.assertEqual(balance['balance'], '10.00') + self.assertEqual(balance['currency'], 'USD') + + def test_ex_get_pricing(self): + pricing = self.driver.ex_get_pricing() + + self.assertTrue('current' in pricing) + self.assertTrue('next' in pricing) + self.assertTrue('objects' in pricing) + + def test_ex_get_usage(self): + pricing = self.driver.ex_get_usage() + + self.assertTrue('balance' in pricing) + self.assertTrue('usage' in pricing) + + def test_ex_list_subscriptions(self): + subscriptions = self.driver.ex_list_subscriptions() + + subscription = subscriptions[0] + self.assertEqual(len(subscriptions), 5) + self.assertEqual(subscription.id, '7272') + self.assertEqual(subscription.resource, 'vlan') + self.assertEqual(subscription.amount, 1) + self.assertEqual(subscription.period, '345 days, 0:00:00') + self.assertEqual(subscription.status, 'active') + self.assertEqual(subscription.price, '0E-20') + + def test_ex_create_subscription(self): + CloudSigmaMockHttp.type = 'CREATE_SUBSCRIPTION' + subscription = self.driver.ex_create_subscription(amount=1, + period='1 month', + resource='vlan') + self.assertEqual(subscription.amount, 1) + self.assertEqual(subscription.period, '1 month') + self.assertEqual(subscription.resource, 'vlan') + self.assertEqual(subscription.price, '10.26666666666666666666666667') + self.assertEqual(subscription.auto_renew, False) + self.assertEqual(subscription.subscribed_object, '2494079f-8376-40bf-9b37-34d633b8a7b7') + + def test_ex_list_subscriptions_status_filterting(self): + CloudSigmaMockHttp.type = 'STATUS_FILTER' + self.driver.ex_list_subscriptions(status='active') + + def test_ex_list_subscriptions_resource_filterting(self): + CloudSigmaMockHttp.type = 'RESOURCE_FILTER' + resources = ['cpu', 'mem'] + self.driver.ex_list_subscriptions(resources=resources) + + def test_ex_toggle_subscription_auto_renew(self): + subscription = self.driver.ex_list_subscriptions()[0] + status = self.driver.ex_toggle_subscription_auto_renew( + subscription=subscription) + self.assertTrue(status) + + def test_ex_list_capabilities(self): + capabilities = self.driver.ex_list_capabilities() + self.assertEqual(capabilities['servers']['cpu']['min'], 250) + + def test_ex_list_servers_availability_groups(self): + groups = self.driver.ex_list_servers_availability_groups() + self.assertEqual(len(groups), 3) + self.assertEqual(len(groups[0]), 2) + self.assertEqual(len(groups[2]), 1) + + def test_ex_list_drives_availability_groups(self): + groups = self.driver.ex_list_drives_availability_groups() + self.assertEqual(len(groups), 1) + self.assertEqual(len(groups[0]), 11) + + def test_wait_for_drive_state_transition_timeout(self): + drive = self.driver.ex_list_user_drives()[0] + state = 'timeout' + + expected_msg = 'Timed out while waiting for drive transition' + self.assertRaisesRegexp(Exception, expected_msg, + self.driver._wait_for_drive_state_transition, + drive=drive, state=state, + timeout=0.5) + + def test_wait_for_drive_state_transition_success(self): + drive = self.driver.ex_list_user_drives()[0] + state = 'unmounted' + + drive = self.driver._wait_for_drive_state_transition(drive=drive, + state=state, + timeout=0.5) + self.assertEqual(drive.status, state) + + +class CloudSigmaAPI20DirectTestCase(CloudSigmaAPI20BaseTestCase, + unittest.TestCase): + driver_klass = CloudSigma_2_0_NodeDriver + driver_args = ('foo', 'bar') + driver_kwargs = {} + + +class CloudSigmaAPI20IndirectTestCase(CloudSigmaAPI20BaseTestCase, + unittest.TestCase): + driver_klass = CloudSigmaNodeDriver + driver_args = ('foo', 'bar') + driver_kwargs = {'api_version': '2.0'} + + +class CloudSigmaMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('cloudsigma_2_0') + + def _api_2_0_servers_detail_INVALID_CREDS(self, method, url, body, headers): + body = self.fixtures.load('libdrives.json') + return (httplib.UNAUTHORIZED, body, {}, + httplib.responses[httplib.UNAUTHORIZED]) + + def _api_2_0_libdrives(self, method, url, body, headers): + body = self.fixtures.load('libdrives.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('servers_detail_mixed_state.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911(self, method, url, body, headers): + body = '' + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _api_2_0_servers(self, method, url, body, headers): + if method == 'POST': + # create_node + + parsed = json.loads(body) + + if 'vlan' in parsed['name']: + self.assertEqual(len(parsed['nics']), 2) + body = self.fixtures.load('servers_create_with_vlan.json') + else: + body = self.fixtures.load('servers_create.json') + + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_start(self, method, url, body, headers): + body = self.fixtures.load('start_success.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_AVOID_MODE_start(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'avoid': '1,2'}) + + body = self.fixtures.load('start_success.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STARTED_start(self, method, url, body, headers): + body = self.fixtures.load('start_already_started.json') + return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_stop(self, method, url, body, headers): + body = self.fixtures.load('stop_success.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STOPPED_stop(self, method, url, body, headers): + body = self.fixtures.load('stop_already_stopped.json') + return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_clone(self, method, url, body, headers): + body = self.fixtures.load('servers_clone.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_open_vnc(self, method, url, body, headers): + body = self.fixtures.load('servers_open_vnc.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_close_vnc(self, method, url, body, headers): + body = self.fixtures.load('servers_close_vnc.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_drives_detail(self, method, url, body, headers): + body = self.fixtures.load('drives_detail.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913(self, method, url, body, headers): + body = self.fixtures.load('drives_get.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809(self, method, url, body, headers): + body = self.fixtures.load('drives_get.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_drives_CREATE(self, method, url, body, headers): + body = self.fixtures.load('drives_create.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_clone(self, method, url, body, headers): + body = self.fixtures.load('drives_clone.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_drives_5236b9ee_f735_42fd_a236_17558f9e12d3_action_clone(self, method, url, body, headers): + body = self.fixtures.load('drives_clone.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913_action_resize(self, method, url, body, headers): + body = self.fixtures.load('drives_resize.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_resize(self, method, url, body, headers): + body = self.fixtures.load('drives_resize.json') + return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) + + def _api_2_0_fwpolicies_detail(self, method, url, body, headers): + body = self.fixtures.load('fwpolicies_detail.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_fwpolicies_CREATE_NO_RULES(self, method, url, body, headers): + body = self.fixtures.load('fwpolicies_create_no_rules.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_fwpolicies_CREATE_WITH_RULES(self, method, url, body, headers): + body = self.fixtures.load('fwpolicies_create_with_rules.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_ATTACH_POLICY(self, method, url, body, headers): + body = self.fixtures.load('servers_attach_policy.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_fwpolicies_0e339282_0cb5_41ac_a9db_727fb62ff2dc(self, method, url, body, headers): + if method == 'DELETE': + body = '' + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _api_2_0_tags_detail(self, method, url, body, headers): + body = self.fixtures.load('tags_detail.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_tags(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('tags_create.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_tags_WITH_RESOURCES(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('tags_create_with_resources.json') + return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) + + def _api_2_0_tags_a010ec41_2ead_4630_a1d0_237fa77e4d4d(self, method, url, body, headers): + if method == 'GET': + # ex_get_tag + body = self.fixtures.load('tags_get.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == 'PUT': + # ex_tag_resource + body = self.fixtures.load('tags_update.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == 'DELETE': + # ex_delete_tag + body = '' + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _api_2_0_balance(self, method, url, body, headers): + body = self.fixtures.load('balance.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_pricing(self, method, url, body, headers): + body = self.fixtures.load('pricing.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_currentusage(self, method, url, body, headers): + body = self.fixtures.load('currentusage.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_subscriptions(self, method, url, body, headers): + body = self.fixtures.load('subscriptions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_subscriptions_STATUS_FILTER(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'status': 'active'}) + + body = self.fixtures.load('subscriptions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_subscriptions_RESOURCE_FILTER(self, method, url, body, headers): + expected_params = {'resource': 'cpu,mem', 'status': 'all'} + self.assertUrlContainsQueryParams(url, expected_params) + + body = self.fixtures.load('subscriptions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_subscriptions_7272_action_auto_renew(self, method, url, body, headers): + body = '' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_subscriptions_CREATE_SUBSCRIPTION(self, method, url, body, headers): + body = self.fixtures.load('create_subscription.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_capabilities(self, method, url, body, headers): + body = self.fixtures.load('capabilities.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_servers_availability_groups(self, method, url, body, headers): + body = self.fixtures.load('servers_avail_groups.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_2_0_drives_availability_groups(self, method, url, body, headers): + body = self.fixtures.load('drives_avail_groups.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_cloudstack.py libcloud-0.15.1/libcloud/test/compute/test_cloudstack.py --- libcloud-0.5.0/libcloud/test/compute/test_cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_cloudstack.py 2014-06-27 11:27:01.000000000 +0000 @@ -0,0 +1,696 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qsl + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import ProviderError +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver +from libcloud.compute.types import LibcloudError, Provider, InvalidCredsError +from libcloud.compute.types import KeyPairDoesNotExistError +from libcloud.compute.providers import get_driver + +from libcloud.test import unittest +from libcloud.test import MockHttpTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class CloudStackCommonTestCase(TestCaseMixin): + driver_klass = CloudStackNodeDriver + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = \ + (None, CloudStackMockHttp) + self.driver = self.driver_klass('apikey', 'secret', + path='/test/path', + host='api.dummy.com') + self.driver.path = '/test/path' + self.driver.type = -1 + CloudStackMockHttp.type = None + CloudStackMockHttp.fixture_tag = 'default' + self.driver.connection.poll_interval = 0.0 + + def test_invalid_credentials(self): + CloudStackMockHttp.type = 'invalid_credentials' + driver = self.driver_klass('invalid', 'invalid', path='/test/path', + host='api.dummy.com') + self.assertRaises(InvalidCredsError, driver.list_nodes) + + def test_import_keypair_from_string_api_error(self): + CloudStackMockHttp.type = 'api_error' + + name = 'test-pair' + key_material = '' + + expected_msg = 'Public key is invalid' + self.assertRaisesRegexp(ProviderError, expected_msg, + self.driver.import_key_pair_from_string, + name=name, key_material=key_material) + + def test_create_node_immediate_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + CloudStackMockHttp.fixture_tag = 'deployfail' + try: + self.driver.create_node(name='node-name', + image=image, + size=size) + except: + return + self.assertTrue(False) + + def test_create_node_delayed_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + CloudStackMockHttp.fixture_tag = 'deployfail2' + try: + self.driver.create_node(name='node-name', + image=image, + size=size) + except: + return + self.assertTrue(False) + + def test_create_node_default_location_success(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + default_location = self.driver.list_locations()[0] + + node = self.driver.create_node(name='fred', + image=image, + size=size) + + self.assertEqual(node.name, 'fred') + self.assertEqual(node.public_ips, []) + self.assertEqual(node.private_ips, ['192.168.1.2']) + self.assertEqual(node.extra['zone_id'], default_location.id) + + def test_create_node_ex_security_groups(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + sg = [sg['name'] for sg in self.driver.ex_list_security_groups()] + CloudStackMockHttp.fixture_tag = 'deploysecuritygroup' + node = self.driver.create_node(name='test', + location=location, + image=image, + size=size, + ex_security_groups=sg) + self.assertEqual(node.name, 'test') + self.assertEqual(node.extra['security_group'], sg) + self.assertEqual(node.id, 'fc4fd31a-16d3-49db-814a-56b39b9ef986') + + def test_create_node_ex_keyname(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + CloudStackMockHttp.fixture_tag = 'deploykeyname' + node = self.driver.create_node(name='test', + location=location, + image=image, + size=size, + ex_keyname='foobar') + self.assertEqual(node.name, 'test') + self.assertEqual(node.extra['key_name'], 'foobar') + + def test_create_node_project(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + project = self.driver.ex_list_projects()[0] + CloudStackMockHttp.fixture_tag = 'deployproject' + node = self.driver.create_node(name='test', + location=location, + image=image, + size=size, + project=project) + self.assertEqual(node.name, 'TestNode') + self.assertEqual(node.extra['project'], 'Test Project') + + def test_list_images_no_images_available(self): + CloudStackMockHttp.fixture_tag = 'notemplates' + + images = self.driver.list_images() + self.assertEqual(0, len(images)) + + def test_list_images(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listTemplates_default.json') + templates = fixture['listtemplatesresponse']['template'] + + images = self.driver.list_images() + for i, image in enumerate(images): + # NodeImage expects id to be a string, + # the CloudStack fixture has an int + tid = str(templates[i]['id']) + tname = templates[i]['name'] + self.assertIsInstance(image.driver, CloudStackNodeDriver) + self.assertEqual(image.id, tid) + self.assertEqual(image.name, tname) + + def test_ex_list_disk_offerings(self): + diskOfferings = self.driver.ex_list_disk_offerings() + self.assertEqual(1, len(diskOfferings)) + + diskOffering, = diskOfferings + + self.assertEqual('Disk offer 1', diskOffering.name) + self.assertEqual(10, diskOffering.size) + + def test_ex_list_networks(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNetworks_default.json') + fixture_networks = fixture['listnetworksresponse']['network'] + + networks = self.driver.ex_list_networks() + + for i, network in enumerate(networks): + self.assertEqual(network.id, fixture_networks[i]['id']) + self.assertEqual( + network.displaytext, fixture_networks[i]['displaytext']) + self.assertEqual(network.name, fixture_networks[i]['name']) + self.assertEqual( + network.networkofferingid, + fixture_networks[i]['networkofferingid']) + self.assertEqual(network.zoneid, fixture_networks[i]['zoneid']) + + def test_ex_list_network_offerings(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNetworkOfferings_default.json') + fixture_networkoffers = \ + fixture['listnetworkofferingsresponse']['networkoffering'] + + networkoffers = self.driver.ex_list_network_offerings() + + for i, networkoffer in enumerate(networkoffers): + self.assertEqual(networkoffer.id, fixture_networkoffers[i]['id']) + self.assertEqual(networkoffer.name, + fixture_networkoffers[i]['name']) + self.assertEqual(networkoffer.display_text, + fixture_networkoffers[i]['displaytext']) + self.assertEqual(networkoffer.for_vpc, + fixture_networkoffers[i]['forvpc']) + self.assertEqual(networkoffer.guest_ip_type, + fixture_networkoffers[i]['guestiptype']) + self.assertEqual(networkoffer.service_offering_id, + fixture_networkoffers[i]['serviceofferingid']) + + def test_ex_create_network(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'createNetwork_default.json') + + fixture_network = fixture['createnetworkresponse']['network'] + + netoffer = self.driver.ex_list_network_offerings()[0] + location = self.driver.list_locations()[0] + network = self.driver.ex_create_network(display_text='test', + name='test', + network_offering=netoffer, + location=location, + gateway='10.1.1.1', + netmask='255.255.255.0', + network_domain='cloud.local', + vpc_id="2", + project_id="2") + + self.assertEqual(network.name, fixture_network['name']) + self.assertEqual(network.displaytext, fixture_network['displaytext']) + self.assertEqual(network.id, fixture_network['id']) + self.assertEqual(network.extra['gateway'], fixture_network['gateway']) + self.assertEqual(network.extra['netmask'], fixture_network['netmask']) + self.assertEqual(network.networkofferingid, + fixture_network['networkofferingid']) + self.assertEqual(network.extra['vpc_id'], fixture_network['vpcid']) + self.assertEqual(network.extra['project_id'], + fixture_network['projectid']) + + def test_ex_delete_network(self): + + network = self.driver.ex_list_networks()[0] + + result = self.driver.ex_delete_network(network=network) + self.assertTrue(result) + + def test_ex_list_projects(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listProjects_default.json') + fixture_projects = fixture['listprojectsresponse']['project'] + + projects = self.driver.ex_list_projects() + + for i, project in enumerate(projects): + self.assertEqual(project.id, fixture_projects[i]['id']) + self.assertEqual( + project.display_text, fixture_projects[i]['displaytext']) + self.assertEqual(project.name, fixture_projects[i]['name']) + self.assertEqual( + project.extra['domainid'], + fixture_projects[i]['domainid']) + self.assertEqual( + project.extra['cpulimit'], + fixture_projects[i]['cpulimit']) + + def test_create_volume(self): + volumeName = 'vol-0' + location = self.driver.list_locations()[0] + + volume = self.driver.create_volume(10, volumeName, location) + + self.assertEqual(volumeName, volume.name) + self.assertEqual(10, volume.size) + + def test_create_volume_no_noncustomized_offering_with_size(self): + """If the sizes of disk offerings are not configurable and there + are no disk offerings with the requested size, an exception should + be thrown.""" + + location = self.driver.list_locations()[0] + + self.assertRaises( + LibcloudError, + self.driver.create_volume, + 'vol-0', location, 11) + + def test_create_volume_with_custom_disk_size_offering(self): + CloudStackMockHttp.fixture_tag = 'withcustomdisksize' + + volumeName = 'vol-0' + location = self.driver.list_locations()[0] + + volume = self.driver.create_volume(10, volumeName, location) + + self.assertEqual(volumeName, volume.name) + + def test_attach_volume(self): + node = self.driver.list_nodes()[0] + volumeName = 'vol-0' + location = self.driver.list_locations()[0] + + volume = self.driver.create_volume(10, volumeName, location) + attachReturnVal = self.driver.attach_volume(volume, node) + + self.assertTrue(attachReturnVal) + + def test_detach_volume(self): + volumeName = 'gre-test-volume' + location = self.driver.list_locations()[0] + volume = self.driver.create_volume(10, volumeName, location) + res = self.driver.detach_volume(volume) + self.assertTrue(res) + + def test_destroy_volume(self): + volumeName = 'gre-test-volume' + location = self.driver.list_locations()[0] + volume = self.driver.create_volume(10, volumeName, location) + res = self.driver.destroy_volume(volume) + self.assertTrue(res) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + self.assertEqual(1, len(volumes)) + self.assertEqual('ROOT-69942', volumes[0].name) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(2, len(nodes)) + self.assertEqual('test', nodes[0].name) + self.assertEqual('2600', nodes[0].id) + self.assertEqual([], nodes[0].extra['security_group']) + self.assertEqual(None, nodes[0].extra['key_name']) + + def test_list_locations(self): + location = self.driver.list_locations()[0] + self.assertEqual('1', location.id) + self.assertEqual('Sydney', location.name) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual('Compute Micro PRD', sizes[0].name) + self.assertEqual('105', sizes[0].id) + self.assertEqual(384, sizes[0].ram) + self.assertEqual('Compute Large PRD', sizes[2].name) + self.assertEqual('69', sizes[2].id) + self.assertEqual(6964, sizes[2].ram) + + def test_ex_start_node(self): + node = self.driver.list_nodes()[0] + res = node.ex_start() + self.assertEqual('Starting', res) + + def test_ex_stop_node(self): + node = self.driver.list_nodes()[0] + res = node.ex_stop() + self.assertEqual('Stopped', res) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + res = node.destroy() + self.assertTrue(res) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + res = node.reboot() + self.assertTrue(res) + + def test_list_key_pairs(self): + keypairs = self.driver.list_key_pairs() + fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ + '00:00:00:00:00' + + self.assertEqual(keypairs[0].name, 'cs-keypair') + self.assertEqual(keypairs[0].fingerprint, fingerprint) + + # Test old and deprecated way + keypairs = self.driver.ex_list_keypairs() + + self.assertEqual(keypairs[0]['name'], 'cs-keypair') + self.assertEqual(keypairs[0]['fingerprint'], fingerprint) + + def test_list_key_pairs_no_keypair_key(self): + CloudStackMockHttp.fixture_tag = 'no_keys' + keypairs = self.driver.list_key_pairs() + self.assertEqual(keypairs, []) + + def test_get_key_pair(self): + CloudStackMockHttp.fixture_tag = 'get_one' + key_pair = self.driver.get_key_pair(name='cs-keypair') + self.assertEqual(key_pair.name, 'cs-keypair') + + def test_get_key_pair_doesnt_exist(self): + CloudStackMockHttp.fixture_tag = 'get_one_doesnt_exist' + + self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, + name='does-not-exist') + + def test_create_keypair(self): + key_pair = self.driver.create_key_pair(name='test-keypair') + + self.assertEqual(key_pair.name, 'test-keypair') + self.assertTrue(key_pair.fingerprint is not None) + self.assertTrue(key_pair.private_key is not None) + + # Test old and deprecated way + res = self.driver.ex_create_keypair(name='test-keypair') + self.assertEqual(res['name'], 'test-keypair') + self.assertTrue(res['fingerprint'] is not None) + self.assertTrue(res['privateKey'] is not None) + + def test_import_keypair_from_file(self): + fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15' + path = os.path.join(os.path.dirname(__file__), 'fixtures', + 'cloudstack', + 'dummy_rsa.pub') + + key_pair = self.driver.import_key_pair_from_file('foobar', path) + self.assertEqual(key_pair.name, 'foobar') + self.assertEqual(key_pair.fingerprint, fingerprint) + + # Test old and deprecated way + res = self.driver.ex_import_keypair('foobar', path) + self.assertEqual(res['keyName'], 'foobar') + self.assertEqual(res['keyFingerprint'], fingerprint) + + def test_ex_import_keypair_from_string(self): + fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15' + path = os.path.join(os.path.dirname(__file__), 'fixtures', + 'cloudstack', + 'dummy_rsa.pub') + fh = open(path) + key_material = fh.read() + fh.close() + + key_pair = self.driver.import_key_pair_from_string('foobar', key_material=key_material) + self.assertEqual(key_pair.name, 'foobar') + self.assertEqual(key_pair.fingerprint, fingerprint) + + # Test old and deprecated way + res = self.driver.ex_import_keypair_from_string('foobar', key_material=key_material) + self.assertEqual(res['keyName'], 'foobar') + self.assertEqual(res['keyFingerprint'], fingerprint) + + def test_delete_key_pair(self): + key_pair = self.driver.list_key_pairs()[0] + + res = self.driver.delete_key_pair(key_pair=key_pair) + self.assertTrue(res) + + # Test old and deprecated way + res = self.driver.ex_delete_keypair(keypair='cs-keypair') + self.assertTrue(res) + + def test_ex_list_security_groups(self): + groups = self.driver.ex_list_security_groups() + self.assertEqual(2, len(groups)) + self.assertEqual(groups[0]['name'], 'default') + self.assertEqual(groups[1]['name'], 'mongodb') + + def test_ex_list_security_groups_no_securitygroup_key(self): + CloudStackMockHttp.fixture_tag = 'no_groups' + + groups = self.driver.ex_list_security_groups() + self.assertEqual(groups, []) + + def test_ex_create_security_group(self): + group = self.driver.ex_create_security_group(name='MySG') + self.assertEqual(group['name'], 'MySG') + + def test_ex_delete_security_group(self): + res = self.driver.ex_delete_security_group(name='MySG') + self.assertTrue(res) + + def test_ex_authorize_security_group_ingress(self): + res = self.driver.ex_authorize_security_group_ingress('MySG', + 'TCP', + '22', + '22', + '0.0.0.0/0') + self.assertTrue(res) + + def test_ex_list_public_ips(self): + ips = self.driver.ex_list_public_ips() + self.assertEqual(ips[0].address, '1.1.1.116') + + def test_ex_allocate_public_ip(self): + addr = self.driver.ex_allocate_public_ip() + self.assertEqual(addr.address, '7.5.6.1') + self.assertEqual(addr.id, '10987171-8cc9-4d0a-b98f-1698c09ddd2d') + + def test_ex_release_public_ip(self): + addresses = self.driver.ex_list_public_ips() + res = self.driver.ex_release_public_ip(addresses[0]) + self.assertTrue(res) + + def test_ex_create_port_forwarding_rule(self): + node = self.driver.list_nodes()[0] + address = self.driver.ex_list_public_ips()[0] + private_port = 33 + private_end_port = 34 + public_port = 33 + public_end_port = 34 + openfirewall = True + protocol = 'TCP' + rule = self.driver.ex_create_port_forwarding_rule(node, + address, + private_port, + public_port, + protocol, + public_end_port, + private_end_port, + openfirewall) + self.assertEqual(rule.address, address) + self.assertEqual(rule.protocol, protocol) + self.assertEqual(rule.public_port, public_port) + self.assertEqual(rule.public_end_port, public_end_port) + self.assertEqual(rule.private_port, private_port) + self.assertEqual(rule.private_end_port, private_end_port) + + def test_ex_list_port_forwarding_rules(self): + rules = self.driver.ex_list_port_forwarding_rules() + self.assertEqual(len(rules), 1) + rule = rules[0] + self.assertTrue(rule.node) + self.assertEqual(rule.protocol, 'tcp') + self.assertEqual(rule.public_port, '33') + self.assertEqual(rule.public_end_port, '34') + self.assertEqual(rule.private_port, '33') + self.assertEqual(rule.private_end_port, '34') + self.assertEqual(rule.address.address, '1.1.1.116') + + def test_ex_delete_port_forwarding_rule(self): + node = self.driver.list_nodes()[0] + rule = self.driver.ex_list_port_forwarding_rules()[0] + res = self.driver.ex_delete_port_forwarding_rule(node, rule) + self.assertTrue(res) + + def test_node_ex_delete_port_forwarding_rule(self): + node = self.driver.list_nodes()[0] + self.assertEqual(len(node.extra['port_forwarding_rules']), 1) + node.extra['port_forwarding_rules'][0].delete() + self.assertEqual(len(node.extra['port_forwarding_rules']), 0) + + def test_node_ex_create_port_forwarding_rule(self): + node = self.driver.list_nodes()[0] + self.assertEqual(len(node.extra['port_forwarding_rules']), 1) + address = self.driver.ex_list_public_ips()[0] + private_port = 33 + private_end_port = 34 + public_port = 33 + public_end_port = 34 + openfirewall = True + protocol = 'TCP' + rule = node.ex_create_port_forwarding_rule(address, + private_port, + public_port, + protocol, + public_end_port, + private_end_port, + openfirewall) + self.assertEqual(rule.address, address) + self.assertEqual(rule.protocol, protocol) + self.assertEqual(rule.public_port, public_port) + self.assertEqual(rule.public_end_port, public_end_port) + self.assertEqual(rule.private_port, private_port) + self.assertEqual(rule.private_end_port, private_end_port) + self.assertEqual(len(node.extra['port_forwarding_rules']), 2) + + def test_ex_limits(self): + limits = self.driver.ex_limits() + self.assertEqual(limits['max_images'], 20) + self.assertEqual(limits['max_networks'], 20) + self.assertEqual(limits['max_public_ips'], -1) + self.assertEqual(limits['max_vpc'], 20) + self.assertEqual(limits['max_instances'], 20) + self.assertEqual(limits['max_projects'], -1) + self.assertEqual(limits['max_volumes'], 20) + self.assertEqual(limits['max_snapshots'], 20) + + def test_ex_create_tags(self): + node = self.driver.list_nodes()[0] + tags = {'Region': 'Canada'} + resp = self.driver.ex_create_tags([node.id], 'UserVm', tags) + self.assertTrue(resp) + + def test_ex_delete_tags(self): + node = self.driver.list_nodes()[0] + tag_keys = ['Region'] + resp = self.driver.ex_delete_tags([node.id], 'UserVm', tag_keys) + self.assertTrue(resp) + + +class CloudStackTestCase(CloudStackCommonTestCase, unittest.TestCase): + def test_driver_instantiation(self): + urls = [ + 'http://api.exoscale.ch/compute1', # http, default port + 'https://api.exoscale.ch/compute2', # https, default port + 'http://api.exoscale.ch:8888/compute3', # https, custom port + 'https://api.exoscale.ch:8787/compute4', # https, custom port + 'https://api.test.com/compute/endpoint' # https, default port + ] + + expected_values = [ + {'host': 'api.exoscale.ch', 'port': 80, 'path': '/compute1'}, + {'host': 'api.exoscale.ch', 'port': 443, 'path': '/compute2'}, + {'host': 'api.exoscale.ch', 'port': 8888, 'path': '/compute3'}, + {'host': 'api.exoscale.ch', 'port': 8787, 'path': '/compute4'}, + {'host': 'api.test.com', 'port': 443, 'path': '/compute/endpoint'} + ] + + cls = get_driver(Provider.CLOUDSTACK) + + for url, expected in zip(urls, expected_values): + driver = cls('key', 'secret', url=url) + + self.assertEqual(driver.host, expected['host']) + self.assertEqual(driver.path, expected['path']) + self.assertEqual(driver.connection.port, expected['port']) + + def test_user_must_provide_host_and_path_or_url(self): + expected_msg = ('When instantiating CloudStack driver directly ' + 'you also need to provide url or host and path ' + 'argument') + cls = get_driver(Provider.CLOUDSTACK) + + self.assertRaisesRegexp(Exception, expected_msg, cls, + 'key', 'secret') + + try: + cls('key', 'secret', True, 'localhost', '/path') + except Exception: + self.fail('host and path provided but driver raised an exception') + + try: + cls('key', 'secret', url='https://api.exoscale.ch/compute') + except Exception: + self.fail('url provided but driver raised an exception') + + +class CloudStackMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('cloudstack') + fixture_tag = 'default' + + def _load_fixture(self, fixture): + body = self.fixtures.load(fixture) + return body, json.loads(body) + + def _test_path_invalid_credentials(self, method, url, body, headers): + body = '' + return (httplib.UNAUTHORIZED, body, {}, + httplib.responses[httplib.UNAUTHORIZED]) + + def _test_path_api_error(self, method, url, body, headers): + body = self.fixtures.load('registerSSHKeyPair_error.json') + return (431, body, {}, + httplib.responses[httplib.OK]) + + def _test_path(self, method, url, body, headers): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + del query['apiKey'] + del query['response'] + del query['signature'] + command = query.pop('command') + + if hasattr(self, '_cmd_' + command): + return getattr(self, '_cmd_' + command)(**query) + else: + fixture = command + '_' + self.fixture_tag + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + + def _cmd_queryAsyncJobResult(self, jobid): + fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_deployment.py libcloud-0.15.1/libcloud/test/compute/test_deployment.py --- libcloud-0.5.0/libcloud/test/compute/test_deployment.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_deployment.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,503 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +import time +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import u +from libcloud.utils.py3 import PY3 + +from libcloud.compute.deployment import MultiStepDeployment, Deployment +from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment +from libcloud.compute.deployment import ScriptFileDeployment, FileDeployment +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState, DeploymentError, LibcloudError +from libcloud.compute.ssh import BaseSSHClient +from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver as Rackspace + +from libcloud.test import MockHttp, XML_HEADERS +from libcloud.test.file_fixtures import ComputeFileFixtures +from mock import Mock, patch + +from libcloud.test.secrets import RACKSPACE_PARAMS + + +class MockDeployment(Deployment): + + def run(self, node, client): + return node + + +class MockClient(BaseSSHClient): + + def __init__(self, *args, **kwargs): + self.stdout = '' + self.stderr = '' + self.exit_status = 0 + + def put(self, path, contents, chmod=755, mode='w'): + return contents + + def run(self, name): + return self.stdout, self.stderr, self.exit_status + + def delete(self, name): + return True + + +class DeploymentTests(unittest.TestCase): + + def setUp(self): + Rackspace.connectionCls.conn_classes = (None, RackspaceMockHttp) + RackspaceMockHttp.type = None + self.driver = Rackspace(*RACKSPACE_PARAMS) + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + self.driver.features = {'create_node': ['generates_password']} + self.node = Node(id=12345, name='test', state=NodeState.RUNNING, + public_ips=['1.2.3.4'], private_ips=['1.2.3.5'], + driver=Rackspace) + self.node2 = Node(id=123456, name='test', state=NodeState.RUNNING, + public_ips=['1.2.3.4'], private_ips=['1.2.3.5'], + driver=Rackspace) + + def test_multi_step_deployment(self): + msd = MultiStepDeployment() + self.assertEqual(len(msd.steps), 0) + + msd.add(MockDeployment()) + self.assertEqual(len(msd.steps), 1) + + self.assertEqual(self.node, msd.run(node=self.node, client=None)) + + def test_ssh_key_deployment(self): + sshd = SSHKeyDeployment(key='1234') + + self.assertEqual(self.node, sshd.run(node=self.node, + client=MockClient(hostname='localhost'))) + + def test_file_deployment(self): + # use this file (__file__) for obtaining permissions + target = os.path.join('/tmp', os.path.basename(__file__)) + fd = FileDeployment(__file__, target) + self.assertEqual(target, fd.target) + self.assertEqual(__file__, fd.source) + self.assertEqual(self.node, fd.run( + node=self.node, client=MockClient(hostname='localhost'))) + + def test_script_deployment(self): + sd1 = ScriptDeployment(script='foobar', delete=True) + sd2 = ScriptDeployment(script='foobar', delete=False) + sd3 = ScriptDeployment( + script='foobar', delete=False, name='foobarname') + + self.assertTrue(sd1.name.find('deployment') != '1') + self.assertEqual(sd3.name, 'foobarname') + + self.assertEqual(self.node, sd1.run(node=self.node, + client=MockClient(hostname='localhost'))) + self.assertEqual(self.node, sd2.run(node=self.node, + client=MockClient(hostname='localhost'))) + + def test_script_file_deployment(self): + file_path = os.path.abspath(__file__) + with open(file_path, 'rb') as fp: + content = fp.read() + + if PY3: + content = content.decode('utf-8') + + sfd1 = ScriptFileDeployment(script_file=file_path) + self.assertEqual(sfd1.script, content) + + def test_script_deployment_relative_path(self): + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + sd = ScriptDeployment(script='echo "foo"', name='relative.sh') + sd.run(self.node, client) + + client.run.assert_called_once_with('/home/ubuntu/relative.sh') + + def test_script_deployment_absolute_path(self): + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + sd = ScriptDeployment(script='echo "foo"', name='/root/relative.sh') + sd.run(self.node, client) + + client.run.assert_called_once_with('/root/relative.sh') + + def test_script_deployment_with_arguments(self): + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + args = ['arg1', 'arg2', '--option1=test'] + sd = ScriptDeployment(script='echo "foo"', args=args, + name='/root/relative.sh') + sd.run(self.node, client) + + expected = '/root/relative.sh arg1 arg2 --option1=test' + client.run.assert_called_once_with(expected) + + client.reset_mock() + + args = [] + sd = ScriptDeployment(script='echo "foo"', args=args, + name='/root/relative.sh') + sd.run(self.node, client) + + expected = '/root/relative.sh' + client.run.assert_called_once_with(expected) + + def test_script_file_deployment_with_arguments(self): + file_path = os.path.abspath(__file__) + client = Mock() + client.put.return_value = '/home/ubuntu/relative.sh' + client.run.return_value = ('', '', 0) + + args = ['arg1', 'arg2', '--option1=test', 'option2'] + sfd = ScriptFileDeployment(script_file=file_path, args=args, + name='/root/relative.sh') + + sfd.run(self.node, client) + + expected = '/root/relative.sh arg1 arg2 --option1=test option2' + client.run.assert_called_once_with(expected) + + def test_script_deployment_and_sshkey_deployment_argument_types(self): + class FileObject(object): + + def __init__(self, name): + self.name = name + + def read(self): + return 'bar' + + ScriptDeployment(script='foobar') + ScriptDeployment(script=u('foobar')) + ScriptDeployment(script=FileObject('test')) + + SSHKeyDeployment(key='foobar') + SSHKeyDeployment(key=u('foobar')) + SSHKeyDeployment(key=FileObject('test')) + + try: + ScriptDeployment(script=[]) + except TypeError: + pass + else: + self.fail('TypeError was not thrown') + + try: + SSHKeyDeployment(key={}) + except TypeError: + pass + else: + self.fail('TypeError was not thrown') + + def test_wait_until_running_running_instantly(self): + node2, ips = self.driver.wait_until_running( + nodes=[self.node], wait_period=1, + timeout=0.5)[0] + self.assertEqual(self.node.uuid, node2.uuid) + self.assertEqual(['67.23.21.33'], ips) + + def test_wait_until_running_running_after_1_second(self): + RackspaceMockHttp.type = '05_SECOND_DELAY' + node2, ips = self.driver.wait_until_running( + nodes=[self.node], wait_period=1, + timeout=0.5)[0] + self.assertEqual(self.node.uuid, node2.uuid) + self.assertEqual(['67.23.21.33'], ips) + + def test_wait_until_running_running_after_1_second_private_ips(self): + RackspaceMockHttp.type = '05_SECOND_DELAY' + node2, ips = self.driver.wait_until_running( + nodes=[self.node], wait_period=1, + timeout=0.5, ssh_interface='private_ips')[0] + self.assertEqual(self.node.uuid, node2.uuid) + self.assertEqual(['10.176.168.218'], ips) + + def test_wait_until_running_invalid_ssh_interface_argument(self): + try: + self.driver.wait_until_running(nodes=[self.node], wait_period=1, + ssh_interface='invalid') + except ValueError: + pass + else: + self.fail('Exception was not thrown') + + def test_wait_until_running_timeout(self): + RackspaceMockHttp.type = 'TIMEOUT' + + try: + self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, + timeout=0.5) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(e.value.find('Timed out') != -1) + else: + self.fail('Exception was not thrown') + + def test_wait_until_running_running_node_missing_from_list_nodes(self): + RackspaceMockHttp.type = 'MISSING' + + try: + self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, + timeout=0.5) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(e.value.find('Timed out after 0.5 second') != -1) + else: + self.fail('Exception was not thrown') + + def test_wait_until_running_running_multiple_nodes_have_same_uuid(self): + RackspaceMockHttp.type = 'SAME_UUID' + + try: + self.driver.wait_until_running(nodes=[self.node], wait_period=0.1, + timeout=0.5) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue( + e.value.find('Unable to match specified uuids') != -1) + else: + self.fail('Exception was not thrown') + + def test_wait_until_running_running_wait_for_multiple_nodes(self): + RackspaceMockHttp.type = 'MULTIPLE_NODES' + + nodes = self.driver.wait_until_running( + nodes=[self.node, self.node2], wait_period=0.1, + timeout=0.5) + self.assertEqual(self.node.uuid, nodes[0][0].uuid) + self.assertEqual(self.node2.uuid, nodes[1][0].uuid) + self.assertEqual(['67.23.21.33'], nodes[0][1]) + self.assertEqual(['67.23.21.34'], nodes[1][1]) + + def test_ssh_client_connect_success(self): + mock_ssh_client = Mock() + mock_ssh_client.return_value = None + + ssh_client = self.driver._ssh_client_connect( + ssh_client=mock_ssh_client, + timeout=0.5) + self.assertEqual(mock_ssh_client, ssh_client) + + def test_ssh_client_connect_timeout(self): + mock_ssh_client = Mock() + mock_ssh_client.connect = Mock() + mock_ssh_client.connect.side_effect = IOError('bam') + + try: + self.driver._ssh_client_connect(ssh_client=mock_ssh_client, + timeout=0.5) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(e.value.find('Giving up') != -1) + else: + self.fail('Exception was not thrown') + + def test_run_deployment_script_success(self): + task = Mock() + ssh_client = Mock() + + ssh_client2 = self.driver._run_deployment_script(task=task, + node=self.node, + ssh_client=ssh_client, + max_tries=2) + self.assertTrue(isinstance(ssh_client2, Mock)) + + def test_run_deployment_script_exception(self): + task = Mock() + task.run = Mock() + task.run.side_effect = Exception('bar') + ssh_client = Mock() + + try: + self.driver._run_deployment_script(task=task, + node=self.node, + ssh_client=ssh_client, + max_tries=2) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(e.value.find('Failed after 2 tries') != -1) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_success(self, mock_ssh_module, _): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + mock_ssh_module.have_paramiko = True + + deploy = Mock() + + node = self.driver.deploy_node(deploy=deploy) + self.assertEqual(self.node.id, node.id) + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_exception_run_deployment_script(self, mock_ssh_module, + _): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + mock_ssh_module.have_paramiko = True + + deploy = Mock() + deploy.run = Mock() + deploy.run.side_effect = Exception('foo') + + try: + self.driver.deploy_node(deploy=deploy) + except DeploymentError: + e = sys.exc_info()[1] + self.assertTrue(e.node.id, self.node.id) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_exception_ssh_client_connect(self, mock_ssh_module, + ssh_client): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + mock_ssh_module.have_paramiko = True + + deploy = Mock() + ssh_client.side_effect = IOError('bar') + + try: + self.driver.deploy_node(deploy=deploy) + except DeploymentError: + e = sys.exc_info()[1] + self.assertTrue(e.node.id, self.node.id) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.ssh') + def test_deploy_node_depoy_node_not_implemented(self, mock_ssh_module): + self.driver.features = {'create_node': []} + mock_ssh_module.have_paramiko = True + + try: + self.driver.deploy_node(deploy=Mock()) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + self.driver.features = {} + + try: + self.driver.deploy_node(deploy=Mock()) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_password_auth(self, mock_ssh_module, _): + self.driver.features = {'create_node': ['password']} + mock_ssh_module.have_paramiko = True + + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + node = self.driver.deploy_node(deploy=Mock()) + self.assertEqual(self.node.id, node.id) + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_exception_is_thrown_is_paramiko_is_not_available(self, + mock_ssh_module, + _): + self.driver.features = {'create_node': ['password']} + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + mock_ssh_module.have_paramiko = False + + try: + self.driver.deploy_node(deploy=Mock()) + except RuntimeError: + e = sys.exc_info()[1] + self.assertTrue(str(e).find('paramiko is not installed') != -1) + else: + self.fail('Exception was not thrown') + + mock_ssh_module.have_paramiko = True + node = self.driver.deploy_node(deploy=Mock()) + self.assertEqual(self.node.id, node.id) + + +class RackspaceMockHttp(MockHttp): + fixtures = ComputeFileFixtures('openstack') + + def _v2_0_tokens(self, method, url, body, headers): + body = self.fixtures.load('_v2_0__auth_deployment.json') + headers = { + 'content-type': 'application/json' + } + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_success.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_05_SECOND_DELAY(self, method, url, body, headers): + time.sleep(0.5) + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_success.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_TIMEOUT(self, method, url, body, headers): + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_pending.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_MISSING(self, method, url, body, headers): + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_missing.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_SAME_UUID(self, method, url, body, headers): + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_same_uuid.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_MULTIPLE_NODES(self, method, url, body, headers): + body = self.fixtures.load( + 'v1_slug_servers_detail_deployment_multiple_nodes.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_digitalocean.py libcloud-0.15.1/libcloud/test/compute/test_digitalocean.py --- libcloud-0.5.0/libcloud/test/compute/test_digitalocean.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_digitalocean.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,179 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json # NOQA + +from libcloud.utils.py3 import httplib + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.base import NodeImage +from libcloud.compute.drivers.digitalocean import DigitalOceanNodeDriver + +from libcloud.test import LibcloudTestCase, MockHttpTestCase +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import DIGITAL_OCEAN_PARAMS + + +# class DigitalOceanTests(unittest.TestCase, TestCaseMixin): +class DigitalOceanTests(LibcloudTestCase): + + def setUp(self): + DigitalOceanNodeDriver.connectionCls.conn_classes = \ + (None, DigitalOceanMockHttp) + DigitalOceanMockHttp.type = None + self.driver = DigitalOceanNodeDriver(*DIGITAL_OCEAN_PARAMS) + + def test_authentication(self): + DigitalOceanMockHttp.type = 'UNAUTHORIZED_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + def test_list_images_success(self): + images = self.driver.list_images() + self.assertTrue(len(images) >= 1) + + image = images[0] + self.assertTrue(image.id is not None) + self.assertTrue(image.name is not None) + + def test_list_sizes_success(self): + sizes = self.driver.list_sizes() + self.assertTrue(len(sizes) >= 1) + + size = sizes[0] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '512MB') + self.assertEqual(size.ram, 512) + + size = sizes[4] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '8GB') + self.assertEqual(size.ram, 8 * 1024) + + def test_list_locations_success(self): + locations = self.driver.list_locations() + self.assertTrue(len(locations) >= 1) + + location = locations[0] + self.assertEqual(location.id, '1') + self.assertEqual(location.name, 'New York 1') + + def test_list_nodes_success(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + self.assertEqual(nodes[0].name, 'test-2') + self.assertEqual(nodes[0].public_ips, []) + + def test_create_node_invalid_size(self): + image = NodeImage(id='invalid', name=None, driver=self.driver) + size = self.driver.list_sizes()[0] + location = self.driver.list_locations()[0] + + DigitalOceanMockHttp.type = 'INVALID_IMAGE' + expected_msg = r'You specified an invalid image for Droplet creation. \(code: 404\)' + self.assertRaisesRegexp(Exception, expected_msg, + self.driver.create_node, + name='test', size=size, image=image, + location=location) + + def test_reboot_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.reboot_node(node) + self.assertTrue(result) + + def test_destroy_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.destroy_node(node) + self.assertTrue(result) + + def test_ex_rename_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.ex_rename_node(node, 'fedora helios') + self.assertTrue(result) + + def test_ex_list_ssh_keys(self): + keys = self.driver.ex_list_ssh_keys() + self.assertEqual(len(keys), 1) + + self.assertEqual(keys[0].id, 7717) + self.assertEqual(keys[0].name, 'test1') + self.assertEqual(keys[0].pub_key, None) + + def test_ex_destroy_ssh_key(self): + key = self.driver.ex_list_ssh_keys()[0] + result = self.driver.ex_destroy_ssh_key(key.id) + self.assertTrue(result) + + +class DigitalOceanMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('digitalocean') + + def _regions(self, method, url, body, headers): + body = self.fixtures.load('list_locations.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _images(self, method, url, body, headers): + body = self.fixtures.load('list_images.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _sizes(self, method, url, body, headers): + body = self.fixtures.load('list_sizes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets(self, method, url, body, headers): + body = self.fixtures.load('list_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_new_INVALID_IMAGE(self, method, url, body, headers): + # reboot_node + body = self.fixtures.load('error_invalid_image.json') + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) + + def _droplets_119461_reboot(self, method, url, body, headers): + # reboot_node + body = self.fixtures.load('reboot_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_119461_destroy(self, method, url, body, headers): + # destroy_node + self.assertUrlContainsQueryParams(url, {'scrub_data': '1'}) + body = self.fixtures.load('destroy_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_119461_rename(self, method, url, body, headers): + # reboot_node + self.assertUrlContainsQueryParams(url, {'name': 'fedora helios'}) + body = self.fixtures.load('ex_rename_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ssh_keys(self, method, url, body, headers): + body = self.fixtures.load('ex_list_ssh_keys.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ssh_keys_7717_destroy(self, method, url, body, headers): + # destroy_ssh_key + body = self.fixtures.load('ex_destroy_ssh_key.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _droplets_UNAUTHORIZED_CLIENT(self, method, url, body, headers): + body = self.fixtures.load('error.txt') + return (httplib.FOUND, body, {}, httplib.responses[httplib.FOUND]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_dreamhost.py libcloud-0.15.1/libcloud/test/compute/test_dreamhost.py --- libcloud-0.5.0/libcloud/test/compute/test_dreamhost.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_dreamhost.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,284 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.dreamhost import DreamhostNodeDriver +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.secrets import DREAMHOST_PARAMS + + +class DreamhostTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + DreamhostNodeDriver.connectionCls.conn_classes = ( + None, + DreamhostMockHttp + ) + DreamhostMockHttp.type = None + DreamhostMockHttp.use_param = 'cmd' + self.driver = DreamhostNodeDriver(*DREAMHOST_PARAMS) + + def test_invalid_creds(self): + """ + Tests the error-handling for passing a bad API Key to the DreamHost API + """ + DreamhostMockHttp.type = 'BAD_AUTH' + try: + self.driver.list_nodes() + self.assertTrue( + False) # Above command should have thrown an InvalidCredsException + except InvalidCredsError: + self.assertTrue(True) + + def test_list_nodes(self): + """ + Test list_nodes for DreamHost PS driver. Should return a list of two nodes: + - account_id: 000000 + ip: 75.119.203.51 + memory_mb: 500 + ps: ps22174 + start_date: 2010-02-25 + type: web + - account_id: 000000 + ip: 75.119.203.52 + memory_mb: 1500 + ps: ps22175 + start_date: 2010-02-25 + type: mysql + """ + + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + web_node = nodes[0] + mysql_node = nodes[1] + + # Web node tests + self.assertEqual(web_node.id, 'ps22174') + self.assertEqual(web_node.state, NodeState.UNKNOWN) + self.assertTrue('75.119.203.51' in web_node.public_ips) + self.assertTrue( + 'current_size' in web_node.extra and + web_node.extra['current_size'] == 500 + ) + self.assertTrue( + 'account_id' in web_node.extra and + web_node.extra['account_id'] == 000000 + ) + self.assertTrue( + 'type' in web_node.extra and + web_node.extra['type'] == 'web' + ) + # MySql node tests + self.assertEqual(mysql_node.id, 'ps22175') + self.assertEqual(mysql_node.state, NodeState.UNKNOWN) + self.assertTrue('75.119.203.52' in mysql_node.public_ips) + self.assertTrue( + 'current_size' in mysql_node.extra and + mysql_node.extra['current_size'] == 1500 + ) + self.assertTrue( + 'account_id' in mysql_node.extra and + mysql_node.extra['account_id'] == 000000 + ) + self.assertTrue( + 'type' in mysql_node.extra and + mysql_node.extra['type'] == 'mysql' + ) + + def test_create_node(self): + """ + Test create_node for DreamHost PS driver. + This is not remarkably compatible with libcloud. The DH API allows + users to specify what image they want to create and whether to move + all their data to the (web) PS. It does NOT accept a name, size, or + location. The only information it returns is the PS's context id + Once the PS is ready it will appear in the list generated by list_ps. + """ + new_node = self.driver.create_node( + image=self.driver.list_images()[0], + size=self.driver.list_sizes()[0], + movedata='no', + ) + self.assertEqual(new_node.id, 'ps12345') + self.assertEqual(new_node.state, NodeState.PENDING) + self.assertTrue( + 'type' in new_node.extra and + new_node.extra['type'] == 'web' + ) + + def test_destroy_node(self): + """ + Test destroy_node for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_destroy_node_failure(self): + """ + Test destroy_node failure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver.destroy_node(node)) + + def test_reboot_node(self): + """ + Test reboot_node for DreamHost PS driver. + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_reboot_node_failure(self): + """ + Test reboot_node failure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver.reboot_node(node)) + + def test_resize_node(self): + """ + Test resize_node for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver._resize_node(node, 400)) + + def test_resize_node_failure(self): + """ + Test reboot_node faliure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver._resize_node(node, 400)) + + def test_list_images(self): + """ + Test list_images for DreamHost PS driver. + """ + images = self.driver.list_images() + self.assertEqual(len(images), 2) + self.assertEqual(images[0].id, 'web') + self.assertEqual(images[0].name, 'web') + self.assertEqual(images[1].id, 'mysql') + self.assertEqual(images[1].name, 'mysql') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 5) + + size = [s for s in sizes if s.id == 'default'][0] + self.assertEqual(size.id, 'default') + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.ram, 2300) + self.assertEqual(size.price, 115) + + def test_list_locations(self): + try: + self.driver.list_locations() + except NotImplementedError: + pass + + def test_list_locations_response(self): + self.assertRaises(NotImplementedError, self.driver.list_locations) + + +class DreamhostMockHttp(MockHttp): + + def _BAD_AUTH_dreamhost_ps_list_ps(self, method, url, body, headers): + body = json.dumps({'data': 'invalid_api_key', 'result': 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_add_ps(self, method, url, body, headers): + body = json.dumps( + {'data': {'added_web': 'ps12345'}, 'result': 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_list_ps(self, method, url, body, headers): + data = [{ + 'account_id': 000000, + 'ip': '75.119.203.51', + 'memory_mb': 500, + 'ps': 'ps22174', + 'start_date': '2010-02-25', + 'type': 'web' + }, + { + 'account_id': 000000, + 'ip': '75.119.203.52', + 'memory_mb': 1500, + 'ps': 'ps22175', + 'start_date': '2010-02-25', + 'type': 'mysql' + }] + result = 'success' + body = json.dumps({'data': data, 'result': result}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_list_images(self, method, url, body, headers): + data = [{ + 'description': 'Private web server', + 'image': 'web' + }, + { + 'description': 'Private MySQL server', + 'image': 'mysql' + }] + result = 'success' + body = json.dumps({'data': data, 'result': result}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_reboot(self, method, url, body, headers): + body = json.dumps({'data': 'reboot_scheduled', 'result': 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_reboot(self, method, url, body, headers): + body = json.dumps({'data': 'no_such_ps', 'result': 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_set_size(self, method, url, body, headers): + body = json.dumps( + {'data': {'memory-mb': '500'}, 'result': 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_set_size(self, method, url, body, headers): + body = json.dumps( + {'data': 'internal_error_setting_size', 'result': 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_remove_ps(self, method, url, body, headers): + body = json.dumps({'data': 'removed_web', 'result': 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_remove_ps(self, method, url, body, headers): + body = json.dumps({'data': 'no_such_ps', 'result': 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ec2.py libcloud-0.15.1/libcloud/test/compute/test_ec2.py --- libcloud-0.5.0/libcloud/test/compute/test_ec2.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ec2.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,1682 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +from datetime import datetime + +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.ec2 import EC2NodeDriver +from libcloud.compute.drivers.ec2 import EC2USWestNodeDriver +from libcloud.compute.drivers.ec2 import EC2USWestOregonNodeDriver +from libcloud.compute.drivers.ec2 import EC2EUNodeDriver +from libcloud.compute.drivers.ec2 import EC2APSENodeDriver +from libcloud.compute.drivers.ec2 import EC2APNENodeDriver +from libcloud.compute.drivers.ec2 import EC2APSESydneyNodeDriver +from libcloud.compute.drivers.ec2 import EC2SAEastNodeDriver +from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver +from libcloud.compute.drivers.ec2 import OutscaleSASNodeDriver +from libcloud.compute.drivers.ec2 import IdempotentParamError +from libcloud.compute.drivers.ec2 import REGION_DETAILS +from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation +from libcloud.compute.base import StorageVolume, VolumeSnapshot +from libcloud.compute.types import KeyPairDoesNotExistError + +from libcloud.test import MockHttpTestCase, LibcloudTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test import unittest +from libcloud.test.secrets import EC2_PARAMS + + +null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ + '00:00:00:00:00' + + +class BaseEC2Tests(LibcloudTestCase): + + def test_instantiate_driver_valid_regions(self): + regions = REGION_DETAILS.keys() + regions = [d for d in regions if d != 'nimbus'] + + for region in regions: + EC2NodeDriver(*EC2_PARAMS, **{'region': region}) + + def test_instantiate_driver_invalid_regions(self): + for region in ['invalid', 'nimbus']: + try: + EC2NodeDriver(*EC2_PARAMS, **{'region': region}) + except ValueError: + pass + else: + self.fail('Invalid region, but exception was not thrown') + + +class EC2Tests(LibcloudTestCase, TestCaseMixin): + image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml' + region = 'us-east-1' + + def setUp(self): + EC2MockHttp.test = self + EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + + self.driver = EC2NodeDriver(*EC2_PARAMS, + **{'region': self.region}) + + def test_create_node(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', image=image, size=size) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.name, 'foo') + self.assertEqual(node.extra['tags']['Name'], 'foo') + self.assertEqual(len(node.extra['tags']), 1) + + def test_create_node_with_ex_mincount(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', image=image, size=size, + ex_mincount=1, ex_maxcount=10) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.name, 'foo') + self.assertEqual(node.extra['tags']['Name'], 'foo') + self.assertEqual(len(node.extra['tags']), 1) + + def test_create_node_with_metadata(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', + image=image, + size=size, + ex_metadata={'Bar': 'baz', 'Num': '42'}) + self.assertEqual(node.name, 'foo') + self.assertEqual(node.extra['tags']['Name'], 'foo') + self.assertEqual(node.extra['tags']['Bar'], 'baz') + self.assertEqual(node.extra['tags']['Num'], '42') + self.assertEqual(len(node.extra['tags']), 3) + + def test_create_node_idempotent(self): + EC2MockHttp.type = 'idempotent' + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + token = 'testclienttoken' + node = self.driver.create_node(name='foo', image=image, size=size, + ex_clienttoken=token) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.extra['client_token'], token) + + # from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html + # If you repeat the request with the same client token, but change + # another request parameter, Amazon EC2 returns an + # IdempotentParameterMismatch error. + # In our case, changing the parameter doesn't actually matter since we + # are forcing the error response fixture. + EC2MockHttp.type = 'idempotent_mismatch' + + idem_error = None + # different count + try: + self.driver.create_node(name='foo', image=image, size=size, + ex_mincount='2', ex_maxcount='2', + ex_clienttoken=token) + except IdempotentParamError: + e = sys.exc_info()[1] + idem_error = e + self.assertTrue(idem_error is not None) + + def test_create_node_no_availability_zone(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', image=image, size=size) + location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver) + self.assertEqual(node.id, 'i-2ba64342') + node = self.driver.create_node(name='foo', image=image, size=size, + location=location) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.name, 'foo') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + public_ips = sorted(node.public_ips) + self.assertEqual(node.id, 'i-4382922a') + self.assertEqual(node.name, node.id) + self.assertEqual(len(node.public_ips), 2) + self.assertEqual(node.extra['launch_time'], + '2013-12-02T11:58:11.000Z') + self.assertTrue('instance_type' in node.extra) + self.assertEqual(node.extra['availability'], 'us-east-1d') + self.assertEqual(node.extra['key_name'], 'fauxkey') + self.assertEqual(node.extra['monitoring'], 'disabled') + self.assertEqual(node.extra['image_id'], 'ami-3215fe5a') + self.assertEqual(len(node.extra['groups']), 2) + self.assertEqual(len(node.extra['block_device_mapping']), 1) + self.assertEqual(node.extra['block_device_mapping'][0]['device_name'], '/dev/sda1') + self.assertEqual(node.extra['block_device_mapping'][0]['ebs']['volume_id'], 'vol-5e312311') + self.assertTrue(node.extra['block_device_mapping'][0]['ebs']['delete']) + + self.assertEqual(public_ips[0], '1.2.3.4') + + nodes = self.driver.list_nodes(ex_node_ids=['i-4382922a', + 'i-8474834a']) + ret_node1 = nodes[0] + ret_node2 = nodes[1] + + self.assertEqual(ret_node1.id, 'i-4382922a') + self.assertEqual(ret_node2.id, 'i-8474834a') + self.assertEqual(ret_node2.name, 'Test Server 2') + self.assertEqual(ret_node2.extra['subnet_id'], 'subnet-5fd9d412') + self.assertEqual(ret_node2.extra['vpc_id'], 'vpc-61dcd30e') + self.assertEqual(ret_node2.extra['tags']['Group'], 'VPC Test') + self.assertEqual(ret_node1.extra['launch_time'], + '2013-12-02T11:58:11.000Z') + self.assertTrue('instance_type' in ret_node1.extra) + self.assertEqual(ret_node2.extra['launch_time'], + '2013-12-02T15:58:29.000Z') + self.assertTrue('instance_type' in ret_node2.extra) + + def test_ex_list_reserved_nodes(self): + node = self.driver.ex_list_reserved_nodes()[0] + self.assertEqual(node.id, '93bbbca2-c500-49d0-9ede-9d8737400498') + self.assertEqual(node.state, 'active') + self.assertEqual(node.extra['instance_type'], 't1.micro') + self.assertEqual(node.extra['availability'], 'us-east-1b') + self.assertEqual(node.extra['start'], '2013-06-18T12:07:53.161Z') + self.assertEqual(node.extra['duration'], 31536000) + self.assertEqual(node.extra['usage_price'], 0.012) + self.assertEqual(node.extra['fixed_price'], 23.0) + self.assertEqual(node.extra['instance_count'], 1) + self.assertEqual(node.extra['description'], 'Linux/UNIX') + self.assertEqual(node.extra['instance_tenancy'], 'default') + self.assertEqual(node.extra['currency_code'], 'USD') + self.assertEqual(node.extra['offering_type'], 'Light Utilization') + + def test_list_location(self): + locations = self.driver.list_locations() + self.assertTrue(len(locations) > 0) + self.assertEqual(locations[0].name, 'eu-west-1a') + self.assertTrue(locations[0].availability_zone is not None) + self.assertTrue(isinstance(locations[0].availability_zone, + ExEC2AvailabilityZone)) + + def test_list_security_groups(self): + groups = self.driver.ex_list_security_groups() + self.assertEqual(groups, ['WebServers', 'RangedPortsBySource']) + + def test_ex_delete_security_group_by_id(self): + group_id = 'sg-443d0a12' + retValue = self.driver.ex_delete_security_group_by_id(group_id) + self.assertTrue(retValue) + + def test_delete_security_group_by_name(self): + group_name = 'WebServers' + retValue = self.driver.ex_delete_security_group_by_name(group_name) + self.assertTrue(retValue) + + def test_ex_delete_security_group(self): + name = 'WebServers' + retValue = self.driver.ex_delete_security_group(name) + self.assertTrue(retValue) + + def test_authorize_security_group(self): + resp = self.driver.ex_authorize_security_group('TestGroup', '22', '22', + '0.0.0.0/0') + self.assertTrue(resp) + + def test_authorize_security_group_ingress(self): + ranges = ['1.1.1.1/32', '2.2.2.2/32'] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) + self.assertTrue(resp) + groups = [{'group_id': 'sg-949265ff'}] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 23, group_pairs=groups) + self.assertTrue(resp) + + def test_authorize_security_group_egress(self): + ranges = ['1.1.1.1/32', '2.2.2.2/32'] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) + self.assertTrue(resp) + groups = [{'group_id': 'sg-949265ff'}] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) + self.assertTrue(resp) + + def test_revoke_security_group_ingress(self): + ranges = ['1.1.1.1/32', '2.2.2.2/32'] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) + self.assertTrue(resp) + groups = [{'group_id': 'sg-949265ff'}] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) + self.assertTrue(resp) + + def test_revoke_security_group_egress(self): + ranges = ['1.1.1.1/32', '2.2.2.2/32'] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, cidr_ips=ranges) + self.assertTrue(resp) + groups = [{'group_id': 'sg-949265ff'}] + resp = self.driver.ex_authorize_security_group_ingress('sg-42916629', 22, 22, group_pairs=groups) + self.assertTrue(resp) + + def test_reboot_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_ex_start_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.ex_start_node(node) + self.assertTrue(ret) + + def test_ex_stop_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.ex_stop_node(node) + self.assertTrue(ret) + + def test_ex_create_node_with_ex_blockdevicemappings(self): + EC2MockHttp.type = 'create_ex_blockdevicemappings' + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + mappings = [ + {'DeviceName': '/dev/sda1', 'Ebs.VolumeSize': 10}, + {'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}, + {'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'} + ] + node = self.driver.create_node(name='foo', image=image, size=size, + ex_blockdevicemappings=mappings) + self.assertEqual(node.id, 'i-2ba64342') + + def test_ex_create_node_with_ex_blockdevicemappings_attribute_error(self): + EC2MockHttp.type = 'create_ex_blockdevicemappings' + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + + mappings = 'this should be a list' + self.assertRaises(AttributeError, self.driver.create_node, name='foo', + image=image, size=size, + ex_blockdevicemappings=mappings) + + mappings = ['this should be a dict'] + self.assertRaises(AttributeError, self.driver.create_node, name='foo', + image=image, size=size, + ex_blockdevicemappings=mappings) + + def test_destroy_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_sizes(self): + region_old = self.driver.region_name + + names = [ + ('ec2_us_east', 'us-east-1'), + ('ec2_us_west', 'us-west-1'), + ('ec2_eu_west', 'eu-west-1'), + ('ec2_ap_southeast', 'ap-southeast-1'), + ('ec2_ap_northeast', 'ap-northeast-1'), + ('ec2_ap_southeast_2', 'ap-southeast-2') + ] + + for api_name, region_name in names: + self.driver.api_name = api_name + self.driver.region_name = region_name + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('t1.micro' in ids) + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + self.assertTrue('c1.medium' in ids) + self.assertTrue('c1.xlarge' in ids) + self.assertTrue('m2.xlarge' in ids) + self.assertTrue('m2.2xlarge' in ids) + self.assertTrue('m2.4xlarge' in ids) + + if region_name == 'us-east-1': + self.assertEqual(len(sizes), 33) + self.assertTrue('cg1.4xlarge' in ids) + self.assertTrue('cc2.8xlarge' in ids) + self.assertTrue('cr1.8xlarge' in ids) + elif region_name == 'us-west-1': + self.assertEqual(len(sizes), 29) + if region_name == 'us-west-2': + self.assertEqual(len(sizes), 29) + elif region_name == 'ap-southeast-1': + self.assertEqual(len(sizes), 24) + elif region_name == 'ap-southeast-2': + self.assertEqual(len(sizes), 29) + elif region_name == 'eu-west-1': + self.assertEqual(len(sizes), 31) + + self.driver.region_name = region_old + + def test_ex_create_node_with_ex_iam_profile(self): + iamProfile = { + 'id': 'AIDGPMS9RO4H3FEXAMPLE', + 'name': 'Foo', + 'arn': 'arn:aws:iam:...' + } + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + + EC2MockHttp.type = None + node1 = self.driver.create_node(name='foo', image=image, size=size) + EC2MockHttp.type = 'ex_iam_profile' + node2 = self.driver.create_node(name='bar', image=image, size=size, + ex_iam_profile=iamProfile['name']) + node3 = self.driver.create_node(name='bar', image=image, size=size, + ex_iam_profile=iamProfile['arn']) + + self.assertFalse(node1.extra['iam_profile']) + self.assertEqual(node2.extra['iam_profile'], iamProfile['id']) + self.assertEqual(node3.extra['iam_profile'], iamProfile['id']) + + def test_list_images(self): + images = self.driver.list_images() + + self.assertEqual(len(images), 2) + location = '123456788908/Test Image' + self.assertEqual(images[0].id, 'ami-57ba933a') + self.assertEqual(images[0].name, 'Test Image') + self.assertEqual(images[0].extra['image_location'], location) + self.assertEqual(images[0].extra['architecture'], 'x86_64') + self.assertEqual(len(images[0].extra['block_device_mapping']), 2) + ephemeral = images[0].extra['block_device_mapping'][1]['virtual_name'] + self.assertEqual(ephemeral, 'ephemeral0') + + location = '123456788908/Test Image 2' + self.assertEqual(images[1].id, 'ami-85b2a8ae') + self.assertEqual(images[1].name, 'Test Image 2') + self.assertEqual(images[1].extra['image_location'], location) + self.assertEqual(images[1].extra['architecture'], 'x86_64') + size = images[1].extra['block_device_mapping'][0]['ebs']['volume_size'] + self.assertEqual(size, 20) + + def test_list_images_with_image_ids(self): + EC2MockHttp.type = 'ex_imageids' + images = self.driver.list_images(ex_image_ids=['ami-57ba933a']) + + self.assertEqual(len(images), 1) + self.assertEqual(images[0].name, 'Test Image') + + def test_list_images_with_executable_by(self): + images = self.driver.list_images(ex_executableby='self') + + self.assertEqual(len(images), 2) + + def test_get_image(self): + image = self.driver.get_image('ami-57ba933a') + self.assertEqual(image.id, 'ami-57ba933a') + self.assertEqual(image.name, 'Test Image') + self.assertEqual(image.extra['architecture'], 'x86_64') + self.assertEqual(len(image.extra['block_device_mapping']), 2) + + def test_copy_image(self): + image = self.driver.list_images()[0] + resp = self.driver.copy_image(image, 'us-east-1', + name='Faux Image', + description='Test Image Copy') + self.assertEqual(resp.id, 'ami-4db38224') + + def test_create_image(self): + node = self.driver.list_nodes()[0] + + mapping = [{'VirtualName': None, + 'Ebs': {'VolumeSize': 10, + 'VolumeType': 'standard', + 'DeleteOnTermination': 'true'}, + 'DeviceName': '/dev/sda1'}] + + resp = self.driver.create_image(node, + 'New Image', + description='New EBS Image', + block_device_mapping=mapping) + self.assertEqual(resp.id, 'ami-e9b38280') + + def test_create_image_no_mapping(self): + node = self.driver.list_nodes()[0] + + resp = self.driver.create_image(node, + 'New Image', + description='New EBS Image') + self.assertEqual(resp.id, 'ami-e9b38280') + + def delete_image(self): + images = self.driver.list_images() + image = images[0] + + resp = self.driver.delete_image(image) + self.assertTrue(resp) + + def ex_register_image(self): + mapping = [{'DeviceName': '/dev/sda1', + 'Ebs': {'SnapshotId': 'snap-5ade3e4e'}}] + image = self.driver.ex_register_image(name='Test Image', + root_device_name='/dev/sda1', + description='My Image', + architecture='x86_64', + block_device_mapping=mapping) + self.assertEqual(image.id, 'ami-57c2fb3e') + + def test_ex_list_availability_zones(self): + availability_zones = self.driver.ex_list_availability_zones() + availability_zone = availability_zones[0] + self.assertTrue(len(availability_zones) > 0) + self.assertEqual(availability_zone.name, 'eu-west-1a') + self.assertEqual(availability_zone.zone_state, 'available') + self.assertEqual(availability_zone.region_name, 'eu-west-1') + + def test_list_keypairs(self): + keypairs = self.driver.list_key_pairs() + + self.assertEqual(len(keypairs), 1) + self.assertEqual(keypairs[0].name, 'gsg-keypair') + self.assertEqual(keypairs[0].fingerprint, null_fingerprint) + + # Test old deprecated method + keypairs = self.driver.ex_list_keypairs() + + self.assertEqual(len(keypairs), 1) + self.assertEqual(keypairs[0]['keyName'], 'gsg-keypair') + self.assertEqual(keypairs[0]['keyFingerprint'], null_fingerprint) + + def test_get_key_pair(self): + EC2MockHttp.type = 'get_one' + + key_pair = self.driver.get_key_pair(name='gsg-keypair') + self.assertEqual(key_pair.name, 'gsg-keypair') + + def test_get_key_pair_does_not_exist(self): + EC2MockHttp.type = 'doesnt_exist' + + self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, + name='test-key-pair') + + def test_create_key_pair(self): + key_pair = self.driver.create_key_pair(name='test-keypair') + + fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d' + ':37:2d:7d:b8:ca:9f:f5:f1:6f') + + self.assertEqual(key_pair.name, 'my-key-pair') + self.assertEqual(key_pair.fingerprint, fingerprint) + self.assertTrue(key_pair.private_key is not None) + + # Test old and deprecated method + key_pair = self.driver.ex_create_keypair(name='test-keypair') + self.assertEqual(key_pair['keyFingerprint'], fingerprint) + self.assertTrue(key_pair['keyMaterial'] is not None) + + def test_ex_describe_all_keypairs(self): + keys = self.driver.ex_describe_all_keypairs() + self.assertEqual(keys, ['gsg-keypair']) + + def test_list_key_pairs(self): + keypair1 = self.driver.list_key_pairs()[0] + + self.assertEqual(keypair1.name, 'gsg-keypair') + self.assertEqual(keypair1.fingerprint, null_fingerprint) + + # Test backward compatibility + keypair2 = self.driver.ex_describe_keypairs('gsg-keypair') + + self.assertEqual(keypair2['keyName'], 'gsg-keypair') + self.assertEqual(keypair2['keyFingerprint'], null_fingerprint) + + def test_delete_key_pair(self): + keypair = self.driver.list_key_pairs()[0] + success = self.driver.delete_key_pair(keypair) + + self.assertTrue(success) + + # Test old and deprecated method + resp = self.driver.ex_delete_keypair('gsg-keypair') + self.assertTrue(resp) + + def test_ex_describe_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + tags = self.driver.ex_describe_tags(resource=node) + + self.assertEqual(len(tags), 3) + self.assertTrue('tag' in tags) + self.assertTrue('owner' in tags) + self.assertTrue('stack' in tags) + + def test_import_key_pair_from_string(self): + path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc', + 'dummy_rsa.pub') + + with open(path, 'r') as fp: + key_material = fp.read() + + key = self.driver.import_key_pair_from_string(name='keypair', + key_material=key_material) + self.assertEqual(key.name, 'keypair') + self.assertEqual(key.fingerprint, null_fingerprint) + + # Test old and deprecated method + key = self.driver.ex_import_keypair_from_string('keypair', + key_material) + self.assertEqual(key['keyName'], 'keypair') + self.assertEqual(key['keyFingerprint'], null_fingerprint) + + def test_import_key_pair_from_file(self): + path = os.path.join(os.path.dirname(__file__), 'fixtures', 'misc', + 'dummy_rsa.pub') + + key = self.driver.import_key_pair_from_file('keypair', path) + self.assertEqual(key.name, 'keypair') + self.assertEqual(key.fingerprint, null_fingerprint) + + # Test old and deprecated method + key = self.driver.ex_import_keypair('keypair', path) + self.assertEqual(key['keyName'], 'keypair') + self.assertEqual(key['keyFingerprint'], null_fingerprint) + + def test_ex_create_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + self.driver.ex_create_tags(node, {'sample': 'tag'}) + + def test_ex_delete_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + self.driver.ex_delete_tags(node, {'sample': 'tag'}) + + def test_ex_describe_addresses_for_node(self): + node1 = Node('i-4382922a', None, None, None, None, self.driver) + ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1) + node2 = Node('i-4382922b', None, None, None, None, self.driver) + ip_addresses2 = sorted( + self.driver.ex_describe_addresses_for_node(node2)) + node3 = Node('i-4382922g', None, None, None, None, self.driver) + ip_addresses3 = sorted( + self.driver.ex_describe_addresses_for_node(node3)) + + self.assertEqual(len(ip_addresses1), 1) + self.assertEqual(ip_addresses1[0], '1.2.3.4') + + self.assertEqual(len(ip_addresses2), 2) + self.assertEqual(ip_addresses2[0], '1.2.3.5') + self.assertEqual(ip_addresses2[1], '1.2.3.6') + + self.assertEqual(len(ip_addresses3), 0) + + def test_ex_describe_addresses(self): + node1 = Node('i-4382922a', None, None, None, None, self.driver) + node2 = Node('i-4382922g', None, None, None, None, self.driver) + nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1]) + nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2]) + + self.assertEqual(len(nodes_elastic_ips1), 1) + self.assertTrue(node1.id in nodes_elastic_ips1) + self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4']) + + self.assertEqual(len(nodes_elastic_ips2), 1) + self.assertTrue(node2.id in nodes_elastic_ips2) + self.assertEqual(nodes_elastic_ips2[node2.id], []) + + def test_ex_describe_all_addresses(self): + EC2MockHttp.type = 'all_addresses' + elastic_ips1 = self.driver.ex_describe_all_addresses() + elastic_ips2 = self.driver.ex_describe_all_addresses( + only_associated=True) + self.assertEqual('1.2.3.7', elastic_ips1[3].ip) + self.assertEqual('vpc', elastic_ips1[3].domain) + self.assertEqual('eipalloc-992a5cf8', elastic_ips1[3].extra['allocation_id']) + + self.assertEqual(len(elastic_ips2), 2) + self.assertEqual('1.2.3.5', elastic_ips2[1].ip) + self.assertEqual('vpc', elastic_ips2[1].domain) + + def test_ex_allocate_address(self): + elastic_ip = self.driver.ex_allocate_address() + self.assertEqual('192.0.2.1', elastic_ip.ip) + self.assertEqual('standard', elastic_ip.domain) + EC2MockHttp.type = 'vpc' + elastic_ip = self.driver.ex_allocate_address(domain='vpc') + self.assertEqual('192.0.2.2', elastic_ip.ip) + self.assertEqual('vpc', elastic_ip.domain) + self.assertEqual('eipalloc-666d7f04', elastic_ip.extra['allocation_id']) + + def test_ex_release_address(self): + EC2MockHttp.type = 'all_addresses' + elastic_ips = self.driver.ex_describe_all_addresses() + EC2MockHttp.type = '' + ret = self.driver.ex_release_address(elastic_ips[2]) + self.assertTrue(ret) + ret = self.driver.ex_release_address(elastic_ips[0], domain='vpc') + self.assertTrue(ret) + self.assertRaises(AttributeError, + self.driver.ex_release_address, + elastic_ips[0], + domain='bogus') + + def test_ex_associate_address_with_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + EC2MockHttp.type = 'all_addresses' + elastic_ips = self.driver.ex_describe_all_addresses() + EC2MockHttp.type = '' + ret1 = self.driver.ex_associate_address_with_node( + node, elastic_ips[2]) + ret2 = self.driver.ex_associate_addresses( + node, elastic_ips[2]) + self.assertEqual(None, ret1) + self.assertEqual(None, ret2) + EC2MockHttp.type = 'vpc' + ret3 = self.driver.ex_associate_address_with_node( + node, elastic_ips[3], domain='vpc') + ret4 = self.driver.ex_associate_addresses( + node, elastic_ips[3], domain='vpc') + self.assertEqual('eipassoc-167a8073', ret3) + self.assertEqual('eipassoc-167a8073', ret4) + self.assertRaises(AttributeError, + self.driver.ex_associate_address_with_node, + node, + elastic_ips[1], + domain='bogus') + + def test_ex_disassociate_address(self): + EC2MockHttp.type = 'all_addresses' + elastic_ips = self.driver.ex_describe_all_addresses() + EC2MockHttp.type = '' + ret = self.driver.ex_disassociate_address(elastic_ips[2]) + self.assertTrue(ret) + # Test a VPC disassociation + ret = self.driver.ex_disassociate_address(elastic_ips[1], + domain='vpc') + self.assertTrue(ret) + self.assertRaises(AttributeError, + self.driver.ex_disassociate_address, + elastic_ips[1], + domain='bogus') + + def test_ex_change_node_size_same_size(self): + size = NodeSize('m1.small', 'Small Instance', + None, None, None, None, driver=self.driver) + node = Node('i-4382922a', None, None, None, None, self.driver, + extra={'instancetype': 'm1.small'}) + + try: + self.driver.ex_change_node_size(node=node, new_size=size) + except ValueError: + pass + else: + self.fail('Same size was passed, but an exception was not thrown') + + def test_ex_change_node_size(self): + size = NodeSize('m1.large', 'Small Instance', + None, None, None, None, driver=self.driver) + node = Node('i-4382922a', None, None, None, None, self.driver, + extra={'instancetype': 'm1.small'}) + + result = self.driver.ex_change_node_size(node=node, new_size=size) + self.assertTrue(result) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + + self.assertEqual(len(volumes), 3) + + self.assertEqual('vol-10ae5e2b', volumes[0].id) + self.assertEqual(1, volumes[0].size) + self.assertEqual('available', volumes[0].extra['state']) + + self.assertEqual('vol-v24bfh75', volumes[1].id) + self.assertEqual(11, volumes[1].size) + self.assertEqual('available', volumes[1].extra['state']) + + self.assertEqual('vol-b6c851ec', volumes[2].id) + self.assertEqual(8, volumes[2].size) + self.assertEqual('in-use', volumes[2].extra['state']) + self.assertEqual('i-d334b4b3', volumes[2].extra['instance_id']) + self.assertEqual('/dev/sda1', volumes[2].extra['device']) + + def test_create_volume(self): + location = self.driver.list_locations()[0] + vol = self.driver.create_volume(10, 'vol', location) + + self.assertEqual(10, vol.size) + self.assertEqual('vol', vol.name) + self.assertEqual('creating', vol.extra['state']) + self.assertTrue(isinstance(vol.extra['create_time'], datetime)) + + def test_destroy_volume(self): + vol = StorageVolume(id='vol-4282672b', name='test', + size=10, driver=self.driver) + + retValue = self.driver.destroy_volume(vol) + self.assertTrue(retValue) + + def test_attach(self): + vol = StorageVolume(id='vol-4282672b', name='test', + size=10, driver=self.driver) + + node = Node('i-4382922a', None, None, None, None, self.driver) + retValue = self.driver.attach_volume(node, vol, '/dev/sdh') + + self.assertTrue(retValue) + + def test_detach(self): + vol = StorageVolume(id='vol-4282672b', name='test', + size=10, driver=self.driver) + + retValue = self.driver.detach_volume(vol) + self.assertTrue(retValue) + + def test_create_volume_snapshot(self): + vol = StorageVolume(id='vol-4282672b', name='test', + size=10, driver=self.driver) + snap = self.driver.create_volume_snapshot( + vol, 'Test snapshot') + self.assertEqual('snap-a7cb2hd9', snap.id) + self.assertEqual(vol.size, snap.size) + self.assertEqual('Test snapshot', snap.extra['name']) + self.assertEqual(vol.id, snap.extra['volume_id']) + self.assertEqual('pending', snap.extra['state']) + + def test_list_snapshots(self): + snaps = self.driver.list_snapshots() + + self.assertEqual(len(snaps), 2) + + self.assertEqual('snap-428abd35', snaps[0].id) + self.assertEqual('vol-e020df80', snaps[0].extra['volume_id']) + self.assertEqual(30, snaps[0].size) + self.assertEqual('Daily Backup', snaps[0].extra['description']) + + self.assertEqual('snap-18349159', snaps[1].id) + self.assertEqual('vol-b5a2c1v9', snaps[1].extra['volume_id']) + self.assertEqual(15, snaps[1].size) + self.assertEqual('Weekly backup', snaps[1].extra['description']) + self.assertEqual('DB Backup 1', snaps[1].extra['name']) + + def test_destroy_snapshot(self): + snap = VolumeSnapshot(id='snap-428abd35', size=10, driver=self.driver) + resp = snap.destroy() + self.assertTrue(resp) + + def test_ex_modify_image_attribute(self): + images = self.driver.list_images() + image = images[0] + + data = {'LaunchPermission.Add.1.Group': 'all'} + resp = self.driver.ex_modify_image_attribute(image, data) + self.assertTrue(resp) + + def test_create_node_ex_security_groups(self): + EC2MockHttp.type = 'ex_security_groups' + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + + security_groups = ['group1', 'group2'] + + # Old, deprecated argument name + self.driver.create_node(name='foo', image=image, size=size, + ex_securitygroup=security_groups) + + # New argument name + self.driver.create_node(name='foo', image=image, size=size, + ex_security_groups=security_groups) + + # Test old and new arguments are mutually exclusive + self.assertRaises(ValueError, self.driver.create_node, + name='foo', image=image, size=size, + ex_securitygroup=security_groups, + ex_security_groups=security_groups) + + def test_ex_get_metadata_for_node(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', + image=image, + size=size, + ex_metadata={'Bar': 'baz', 'Num': '42'}) + + metadata = self.driver.ex_get_metadata_for_node(node) + self.assertEqual(metadata['Name'], 'foo') + self.assertEqual(metadata['Bar'], 'baz') + self.assertEqual(metadata['Num'], '42') + self.assertEqual(len(metadata), 3) + + def test_ex_get_limits(self): + limits = self.driver.ex_get_limits() + + expected = {'max-instances': 20, 'vpc-max-elastic-ips': 5, + 'max-elastic-ips': 5} + self.assertEqual(limits['resource'], expected) + + def test_ex_create_security_group(self): + group = self.driver.ex_create_security_group("WebServers", + "Rules to protect web nodes", + "vpc-143cab4") + + self.assertEqual(group["group_id"], "sg-52e2f530") + + def test_ex_list_networks(self): + vpcs = self.driver.ex_list_networks() + + self.assertEqual(len(vpcs), 2) + + self.assertEqual('vpc-532335e1', vpcs[0].id) + self.assertEqual('vpc-532335e1', vpcs[0].name) + self.assertEqual('192.168.51.0/24', vpcs[0].cidr_block) + self.assertEqual('available', vpcs[0].extra['state']) + self.assertEqual('dopt-7eded312', vpcs[0].extra['dhcp_options_id']) + + self.assertEqual('vpc-62ded30e', vpcs[1].id) + self.assertEqual('Test VPC', vpcs[1].name) + self.assertEqual('192.168.52.0/24', vpcs[1].cidr_block) + self.assertEqual('available', vpcs[1].extra['state']) + self.assertEqual('dopt-7eded312', vpcs[1].extra['dhcp_options_id']) + + def test_ex_list_networks_network_ids(self): + EC2MockHttp.type = 'network_ids' + network_ids = ['vpc-532335e1'] + + # We assert in the mock http method + self.driver.ex_list_networks(network_ids=network_ids) + + def test_ex_list_networks_filters(self): + EC2MockHttp.type = 'filters' + filters = {'dhcp-options-id': 'dopt-7eded312', # matches two networks + 'cidr': '192.168.51.0/24'} # matches one network + + # We assert in the mock http method + self.driver.ex_list_networks(filters=filters) + + def test_ex_create_network(self): + vpc = self.driver.ex_create_network('192.168.55.0/24', + name='Test VPC', + instance_tenancy='default') + + self.assertEqual('vpc-ad3527cf', vpc.id) + self.assertEqual('192.168.55.0/24', vpc.cidr_block) + self.assertEqual('pending', vpc.extra['state']) + + def test_ex_delete_network(self): + vpcs = self.driver.ex_list_networks() + vpc = vpcs[0] + + resp = self.driver.ex_delete_network(vpc) + self.assertTrue(resp) + + def test_ex_list_subnets(self): + subnets = self.driver.ex_list_subnets() + + self.assertEqual(len(subnets), 2) + + self.assertEqual('subnet-ce0e7ce5', subnets[0].id) + self.assertEqual('available', subnets[0].state) + self.assertEqual(123, subnets[0].extra['available_ips']) + + self.assertEqual('subnet-ce0e7ce6', subnets[1].id) + self.assertEqual('available', subnets[1].state) + self.assertEqual(59, subnets[1].extra['available_ips']) + + def test_ex_create_subnet(self): + subnet = self.driver.ex_create_subnet('vpc-532135d1', + '192.168.51.128/26', + 'us-east-1b', + name='Test Subnet') + + self.assertEqual('subnet-ce0e7ce6', subnet.id) + self.assertEqual('pending', subnet.state) + self.assertEqual('vpc-532135d1', subnet.extra['vpc_id']) + + def test_ex_delete_subnet(self): + subnet = self.driver.ex_list_subnets()[0] + resp = self.driver.ex_delete_subnet(subnet=subnet) + self.assertTrue(resp) + + def test_ex_get_console_output(self): + node = self.driver.list_nodes()[0] + resp = self.driver.ex_get_console_output(node) + self.assertEqual('Test String', resp['output']) + + def test_ex_list_network_interfaces(self): + interfaces = self.driver.ex_list_network_interfaces() + + self.assertEqual(len(interfaces), 2) + + self.assertEqual('eni-18e6c05e', interfaces[0].id) + self.assertEqual('in-use', interfaces[0].state) + self.assertEqual('0e:6e:df:72:78:af', + interfaces[0].extra['mac_address']) + + self.assertEqual('eni-83e3c5c5', interfaces[1].id) + self.assertEqual('in-use', interfaces[1].state) + self.assertEqual('0e:93:0b:e9:e9:c4', + interfaces[1].extra['mac_address']) + + def test_ex_create_network_interface(self): + subnet = self.driver.ex_list_subnets()[0] + interface = self.driver.ex_create_network_interface( + subnet, + name='Test Interface', + description='My Test') + + self.assertEqual('eni-2b36086d', interface.id) + self.assertEqual('pending', interface.state) + self.assertEqual('0e:bd:49:3e:11:74', interface.extra['mac_address']) + + def test_ex_delete_network_interface(self): + interface = self.driver.ex_list_network_interfaces()[0] + resp = self.driver.ex_delete_network_interface(interface) + self.assertTrue(resp) + + def test_ex_attach_network_interface_to_node(self): + node = self.driver.list_nodes()[0] + interface = self.driver.ex_list_network_interfaces()[0] + resp = self.driver.ex_attach_network_interface_to_node(interface, + node, 1) + self.assertTrue(resp) + + def test_ex_detach_network_interface(self): + resp = self.driver.ex_detach_network_interface('eni-attach-2b588b47') + self.assertTrue(resp) + + def test_ex_list_internet_gateways(self): + gateways = self.driver.ex_list_internet_gateways() + + self.assertEqual(len(gateways), 2) + + self.assertEqual('igw-84dd3ae1', gateways[0].id) + self.assertEqual('igw-7fdae215', gateways[1].id) + self.assertEqual('available', gateways[1].state) + self.assertEqual('vpc-62cad41e', gateways[1].vpc_id) + + def test_ex_create_internet_gateway(self): + gateway = self.driver.ex_create_internet_gateway() + + self.assertEqual('igw-13ac2b36', gateway.id) + + def test_ex_delete_internet_gateway(self): + gateway = self.driver.ex_list_internet_gateways()[0] + resp = self.driver.ex_delete_internet_gateway(gateway) + self.assertTrue(resp) + + def test_ex_attach_internet_gateway(self): + gateway = self.driver.ex_list_internet_gateways()[0] + network = self.driver.ex_list_networks()[0] + resp = self.driver.ex_attach_internet_gateway(gateway, network) + self.assertTrue(resp) + + def test_ex_detach_internet_gateway(self): + gateway = self.driver.ex_list_internet_gateways()[0] + network = self.driver.ex_list_networks()[0] + resp = self.driver.ex_detach_internet_gateway(gateway, network) + self.assertTrue(resp) + + +class EC2USWest1Tests(EC2Tests): + region = 'us-west-1' + + +class EC2USWest2Tests(EC2Tests): + region = 'us-west-2' + + +class EC2EUWestTests(EC2Tests): + region = 'eu-west-1' + + +class EC2APSE1Tests(EC2Tests): + region = 'ap-southeast-1' + + +class EC2APNETests(EC2Tests): + region = 'ap-northeast-1' + + +class EC2APSE2Tests(EC2Tests): + region = 'ap-southeast-2' + + +class EC2SAEastTests(EC2Tests): + region = 'sa-east-1' + + +# Tests for the old, deprecated way of instantiating a driver. +class EC2OldStyleModelTests(EC2Tests): + driver_klass = EC2USWestNodeDriver + + def setUp(self): + EC2MockHttp.test = self + EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + + self.driver = self.driver_klass(*EC2_PARAMS) + + +class EC2USWest1OldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2USWestNodeDriver + + +class EC2USWest2OldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2USWestOregonNodeDriver + + +class EC2EUWestOldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2EUNodeDriver + + +class EC2APSE1OldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2APSENodeDriver + + +class EC2APNEOldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2APNENodeDriver + + +class EC2APSE2OldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2APSESydneyNodeDriver + + +class EC2SAEastOldStyleModelTests(EC2OldStyleModelTests): + driver_klass = EC2SAEastNodeDriver + + +class EC2MockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('ec2') + + def _DescribeInstances(self, method, url, body, headers): + body = self.fixtures.load('describe_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeReservedInstances(self, method, url, body, headers): + body = self.fixtures.load('describe_reserved_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeAvailabilityZones(self, method, url, body, headers): + body = self.fixtures.load('describe_availability_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RebootInstances(self, method, url, body, headers): + body = self.fixtures.load('reboot_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _StartInstances(self, method, url, body, headers): + body = self.fixtures.load('start_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _StopInstances(self, method, url, body, headers): + body = self.fixtures.load('stop_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeSecurityGroups(self, method, url, body, headers): + body = self.fixtures.load('describe_security_groups.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteSecurityGroup(self, method, url, body, headers): + body = self.fixtures.load('delete_security_group.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AuthorizeSecurityGroupIngress(self, method, url, body, headers): + body = self.fixtures.load('authorize_security_group_ingress.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeImages(self, method, url, body, headers): + body = self.fixtures.load('describe_images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RegisterImages(self, method, url, body, headers): + body = self.fixtures.load('register_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ex_imageids_DescribeImages(self, method, url, body, headers): + body = self.fixtures.load('describe_images_ex_imageids.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ex_security_groups_RunInstances(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'SecurityGroup.1': 'group1'}) + self.assertUrlContainsQueryParams(url, {'SecurityGroup.2': 'group2'}) + + body = self.fixtures.load('run_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers): + expected_params = { + 'BlockDeviceMapping.1.DeviceName': '/dev/sda1', + 'BlockDeviceMapping.1.Ebs.VolumeSize': '10', + 'BlockDeviceMapping.2.DeviceName': '/dev/sdb', + 'BlockDeviceMapping.2.VirtualName': 'ephemeral0', + 'BlockDeviceMapping.3.DeviceName': '/dev/sdc', + 'BlockDeviceMapping.3.VirtualName': 'ephemeral1' + } + self.assertUrlContainsQueryParams(url, expected_params) + + body = self.fixtures.load('run_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances_idem.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_mismatch_RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances_idem_mismatch.xml') + return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) + + def _ex_iam_profile_RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances_iam_profile.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _TerminateInstances(self, method, url, body, headers): + body = self.fixtures.load('terminate_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeKeyPairs(self, method, url, body, headers): + body = self.fixtures.load('describe_key_pairs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _get_one_DescribeKeyPairs(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'}) + + body = self.fixtures.load('describe_key_pairs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _doesnt_exist_DescribeKeyPairs(self, method, url, body, headers): + body = self.fixtures.load('describe_key_pairs_doesnt_exist.xml') + return (httplib.BAD_REQUEST, body, {}, + httplib.responses[httplib.BAD_REQUEST]) + + def _CreateKeyPair(self, method, url, body, headers): + body = self.fixtures.load('create_key_pair.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ImportKeyPair(self, method, url, body, headers): + body = self.fixtures.load('import_key_pair.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeTags(self, method, url, body, headers): + body = self.fixtures.load('describe_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateTags(self, method, url, body, headers): + body = self.fixtures.load('create_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteTags(self, method, url, body, headers): + body = self.fixtures.load('delete_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeAddresses(self, method, url, body, headers): + body = self.fixtures.load('describe_addresses_multi.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AllocateAddress(self, method, url, body, headers): + body = self.fixtures.load('allocate_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _vpc_AllocateAddress(self, method, url, body, headers): + body = self.fixtures.load('allocate_vpc_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AssociateAddress(self, method, url, body, headers): + body = self.fixtures.load('associate_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _vpc_AssociateAddress(self, method, url, body, headers): + body = self.fixtures.load('associate_vpc_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DisassociateAddress(self, method, url, body, headers): + body = self.fixtures.load('disassociate_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ReleaseAddress(self, method, url, body, headers): + body = self.fixtures.load('release_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _all_addresses_DescribeAddresses(self, method, url, body, headers): + body = self.fixtures.load('describe_addresses_all.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _WITH_TAGS_DescribeAddresses(self, method, url, body, headers): + body = self.fixtures.load('describe_addresses_multi.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ModifyInstanceAttribute(self, method, url, body, headers): + body = self.fixtures.load('modify_instance_attribute.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_CreateTags(self, method, url, body, headers): + body = self.fixtures.load('create_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateVolume(self, method, url, body, headers): + body = self.fixtures.load('create_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteVolume(self, method, url, body, headers): + body = self.fixtures.load('delete_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AttachVolume(self, method, url, body, headers): + body = self.fixtures.load('attach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DetachVolume(self, method, url, body, headers): + body = self.fixtures.load('detach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeVolumes(self, method, url, body, headers): + body = self.fixtures.load('describe_volumes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateSnapshot(self, method, url, body, headers): + body = self.fixtures.load('create_snapshot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeSnapshots(self, method, url, body, headers): + body = self.fixtures.load('describe_snapshots.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteSnapshot(self, method, url, body, headers): + body = self.fixtures.load('delete_snapshot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CopyImage(self, method, url, body, headers): + body = self.fixtures.load('copy_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateImage(self, method, url, body, headers): + body = self.fixtures.load('create_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeregisterImage(self, method, url, body, headers): + body = self.fixtures.load('deregister_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteKeyPair(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'KeyName': 'gsg-keypair'}) + + body = self.fixtures.load('delete_key_pair.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ModifyImageAttribute(self, method, url, body, headers): + body = self.fixtures.load('modify_image_attribute.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeAccountAttributes(self, method, url, body, headers): + body = self.fixtures.load('describe_account_attributes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateSecurityGroup(self, method, url, body, headers): + body = self.fixtures.load('create_security_group.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeVpcs(self, method, url, body, headers): + body = self.fixtures.load('describe_vpcs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _network_ids_DescribeVpcs(self, method, url, body, headers): + expected_params = { + 'VpcId.1': 'vpc-532335e1' + } + self.assertUrlContainsQueryParams(url, expected_params) + + body = self.fixtures.load('describe_vpcs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _filters_DescribeVpcs(self, method, url, body, headers): + expected_params_1 = { + 'Filter.1.Name': 'dhcp-options-id', + 'Filter.1.Value.1': 'dopt-7eded312', + 'Filter.2.Name': 'cidr', + 'Filter.2.Value.1': '192.168.51.0/24' + } + + expected_params_2 = { + 'Filter.1.Name': 'cidr', + 'Filter.1.Value.1': '192.168.51.0/24', + 'Filter.2.Name': 'dhcp-options-id', + 'Filter.2.Value.1': 'dopt-7eded312' + } + + try: + self.assertUrlContainsQueryParams(url, expected_params_1) + except AssertionError: + # dict ordering is not guaranteed + self.assertUrlContainsQueryParams(url, expected_params_2) + + body = self.fixtures.load('describe_vpcs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateVpc(self, method, url, body, headers): + body = self.fixtures.load('create_vpc.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteVpc(self, method, url, body, headers): + body = self.fixtures.load('delete_vpc.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeSubnets(self, method, url, body, headers): + body = self.fixtures.load('describe_subnets.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateSubnet(self, method, url, body, headers): + body = self.fixtures.load('create_subnet.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteSubnet(self, method, url, body, headers): + body = self.fixtures.load('delete_subnet.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GetConsoleOutput(self, method, url, body, headers): + body = self.fixtures.load('get_console_output.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeNetworkInterfaces(self, method, url, body, headers): + body = self.fixtures.load('describe_network_interfaces.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateNetworkInterface(self, method, url, body, headers): + body = self.fixtures.load('create_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteNetworkInterface(self, method, url, body, headers): + body = self.fixtures.load('delete_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AttachNetworkInterface(self, method, url, body, headers): + body = self.fixtures.load('attach_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DetachNetworkInterface(self, method, url, body, headers): + body = self.fixtures.load('detach_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeInternetGateways(self, method, url, body, headers): + body = self.fixtures.load('describe_internet_gateways.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateInternetGateway(self, method, url, body, headers): + body = self.fixtures.load('create_internet_gateway.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteInternetGateway(self, method, url, body, headers): + body = self.fixtures.load('delete_internet_gateway.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _AttachInternetGateway(self, method, url, body, headers): + body = self.fixtures.load('attach_internet_gateway.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DetachInternetGateway(self, method, url, body, headers): + body = self.fixtures.load('detach_internet_gateway.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class EucMockHttp(EC2MockHttp): + fixtures = ComputeFileFixtures('ec2') + + def _services_Eucalyptus_DescribeInstances(self, method, url, body, + headers): + return self._DescribeInstances(method, url, body, headers) + + def _services_Eucalyptus_DescribeImages(self, method, url, body, + headers): + return self._DescribeImages(method, url, body, headers) + + def _services_Eucalyptus_DescribeAddresses(self, method, url, body, + headers): + return self._DescribeAddresses(method, url, body, headers) + + def _services_Eucalyptus_RebootInstances(self, method, url, body, + headers): + return self._RebootInstances(method, url, body, headers) + + def _services_Eucalyptus_TerminateInstances(self, method, url, body, + headers): + return self._TerminateInstances(method, url, body, headers) + + def _services_Eucalyptus_RunInstances(self, method, url, body, + headers): + return self._RunInstances(method, url, body, headers) + + def _services_Eucalyptus_CreateTags(self, method, url, body, + headers): + return self._CreateTags(method, url, body, headers) + + def _services_Eucalyptus_DescribeInstanceTypes(self, method, url, body, + headers): + body = self.fixtures.load('describe_instance_types.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class NimbusTests(EC2Tests): + + def setUp(self): + NimbusNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = NimbusNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], + host='some.nimbuscloud.com') + + def test_ex_describe_addresses_for_node(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = Node('i-4382922a', None, None, None, None, self.driver) + ip_addresses = self.driver.ex_describe_addresses_for_node(node) + self.assertEqual(len(ip_addresses), 0) + + def test_ex_describe_addresses(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = Node('i-4382922a', None, None, None, None, self.driver) + nodes_elastic_ips = self.driver.ex_describe_addresses([node]) + + self.assertEqual(len(nodes_elastic_ips), 1) + self.assertEqual(len(nodes_elastic_ips[node.id]), 0) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + + def test_list_nodes(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = self.driver.list_nodes()[0] + self.assertExecutedMethodCount(0) + public_ips = node.public_ips + self.assertEqual(node.id, 'i-4382922a') + self.assertEqual(len(node.public_ips), 1) + self.assertEqual(public_ips[0], '1.2.3.4') + self.assertEqual(node.extra['tags'], {}) + + node = self.driver.list_nodes()[1] + self.assertExecutedMethodCount(0) + public_ips = node.public_ips + self.assertEqual(node.id, 'i-8474834a') + self.assertEqual(len(node.public_ips), 1) + self.assertEqual(public_ips[0], '1.2.3.5') + self.assertEqual(node.extra['tags'], + {'Name': 'Test Server 2', 'Group': 'VPC Test'}) + + def test_ex_create_tags(self): + # Nimbus doesn't support creating tags so this one should be a + # passthrough + node = self.driver.list_nodes()[0] + self.driver.ex_create_tags(resource=node, tags={'foo': 'bar'}) + self.assertExecutedMethodCount(0) + + +class EucTests(LibcloudTestCase, TestCaseMixin): + + def setUp(self): + EucNodeDriver.connectionCls.conn_classes = (None, EucMockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = EucNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], + host='some.eucalyptus.com', api_version='3.4.1') + + def test_list_locations_response(self): + try: + self.driver.list_locations() + except Exception: + pass + else: + self.fail('Exception was not thrown') + + def test_list_location(self): + pass + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + ids = [s.id for s in sizes] + self.assertEqual(len(ids), 18) + self.assertTrue('t1.micro' in ids) + self.assertTrue('m1.medium' in ids) + self.assertTrue('m3.xlarge' in ids) + + +class OutscaleTests(EC2Tests): + + def setUp(self): + OutscaleSASNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = OutscaleSASNodeDriver(key=EC2_PARAMS[0], + secret=EC2_PARAMS[1], + host='some.outscalecloud.com') + + def test_ex_create_network(self): + # overridden from EC2Tests -- Outscale don't support instance_tenancy + vpc = self.driver.ex_create_network('192.168.55.0/24', + name='Test VPC') + + self.assertEqual('vpc-ad3527cf', vpc.id) + self.assertEqual('192.168.55.0/24', vpc.cidr_block) + self.assertEqual('pending', vpc.extra['state']) + + def test_ex_copy_image(self): + # overridden from EC2Tests -- Outscale does not support copying images + image = self.driver.list_images()[0] + try: + self.driver.ex_copy_image('us-east-1', image, + name='Faux Image', + description='Test Image Copy') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_get_limits(self): + # overridden from EC2Tests -- Outscale does not support getting limits + try: + self.driver.ex_get_limits() + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_create_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow creating interfaces + subnet = self.driver.ex_list_subnets()[0] + try: + self.driver.ex_create_network_interface( + subnet, + name='Test Interface', + description='My Test') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_delete_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow deleting interfaces + interface = self.driver.ex_list_network_interfaces()[0] + try: + self.driver.ex_delete_network_interface(interface) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_attach_network_interface_to_node(self): + # overridden from EC2Tests -- Outscale don't allow attaching interfaces + node = self.driver.list_nodes()[0] + interface = self.driver.ex_list_network_interfaces()[0] + try: + self.driver.ex_attach_network_interface_to_node(interface, node, 1) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_detach_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow detaching interfaces + try: + self.driver.ex_detach_network_interface('eni-attach-2b588b47') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ecp.py libcloud-0.15.1/libcloud/test/compute/test_ecp.py --- libcloud-0.5.0/libcloud/test/compute/test_ecp.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ecp.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,131 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.ecp import ECPNodeDriver +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import ECP_PARAMS + + +class ECPTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + ECPNodeDriver.connectionCls.conn_classes = (None, + ECPMockHttp) + self.driver = ECPNodeDriver(*ECP_PARAMS) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + node = nodes[0] + self.assertEqual(node.id, '1') + self.assertEqual(node.name, 'dummy-1') + self.assertEqual(node.public_ips[0], "42.78.124.75") + self.assertEqual(node.state, NodeState.RUNNING) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.ram, 512) + self.assertEqual(size.disk, 0) + self.assertEqual(size.bandwidth, 0) + self.assertEqual(size.price, 0) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 2) + self.assertEqual( + images[0].name, "centos54: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") + self.assertEqual(images[0].id, "1") + + name = "centos54 two: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2" + self.assertEqual(images[1].name, name) + self.assertEqual(images[1].id, "2") + + def test_reboot_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node(self): + # Raises exception on failure + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name="api.ivan.net.nz", image=image, size=size) + self.assertEqual(node.name, "api.ivan.net.nz") + self.assertEqual(node.id, "1234") + + +class ECPMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('ecp') + + def _modules_hosting(self, method, url, body, headers): + headers = {} + headers['set-cookie'] = 'vcloud-token=testtoken' + body = 'Anything' + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _rest_hosting_vm_1(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('vm_1_get.json') + if method == 'POST': + if body.find('delete', 0): + body = self.fixtures.load('vm_1_action_delete.json') + if body.find('stop', 0): + body = self.fixtures.load('vm_1_action_stop.json') + if body.find('start', 0): + body = self.fixtures.load('vm_1_action_start.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_vm(self, method, url, body, headers): + if method == 'PUT': + body = self.fixtures.load('vm_put.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_htemplate_list(self, method, url, body, headers): + body = self.fixtures.load('htemplate_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_network_list(self, method, url, body, headers): + body = self.fixtures.load('network_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_ptemplate_list(self, method, url, body, headers): + body = self.fixtures.load('ptemplate_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_elasticstack.py libcloud-0.15.1/libcloud/test/compute/test_elasticstack.py --- libcloud-0.5.0/libcloud/test/compute/test_elasticstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_elasticstack.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,264 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from libcloud.utils.py3 import httplib + +from libcloud.compute.base import Node +from libcloud.compute.drivers.elasticstack import ElasticStackException +from libcloud.compute.drivers.elastichosts import \ + ElasticHostsNodeDriver as ElasticHosts +from libcloud.compute.drivers.skalicloud import \ + SkaliCloudNodeDriver as SkaliCloud +from libcloud.compute.drivers.serverlove import \ + ServerLoveNodeDriver as ServerLove +from libcloud.common.types import InvalidCredsError, MalformedResponseError + +from libcloud.test import MockHttp, unittest +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class ElasticStackTestCase(object): + + def setUp(self): + # Re-use ElasticHosts fixtures for the base ElasticStack platform tests + """ElasticStack.type = Provider.ELASTICHOSTS + ElasticStack.api_name = 'elastichosts' + + ElasticStackBaseConnection.host = 'test.com' + ElasticStack.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + ElasticStack._standard_drives = ElasticHosts._standard_drives + + self.driver = ElasticStack('foo', 'bar') + """ + self.mockHttp = ElasticStackMockHttp + self.mockHttp.type = None + + self.node = Node(id=72258, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + + def test_invalid_creds(self): + self.mockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_malformed_response(self): + self.mockHttp.type = 'MALFORMED' + try: + self.driver.list_nodes() + except MalformedResponseError: + pass + else: + self.fail('test should have thrown') + + def test_parse_error(self): + self.mockHttp.type = 'PARSE_ERROR' + try: + self.driver.list_nodes() + except Exception: + e = sys.exc_info()[1] + self.assertTrue(str(e).find('X-Elastic-Error') != -1) + else: + self.fail('test should have thrown') + + def test_ex_set_node_configuration(self): + success = self.driver.ex_set_node_configuration(node=self.node, + name='name', + cpu='2') + self.assertTrue(success) + + def test_ex_set_node_configuration_invalid_keys(self): + try: + self.driver.ex_set_node_configuration(node=self.node, foo='bar') + except ElasticStackException: + pass + else: + self.fail( + 'Invalid option specified, but an exception was not thrown') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + self.assertEqual(len(nodes), 1) + + node = nodes[0] + self.assertEqual(node.public_ips[0], "1.2.3.4") + self.assertEqual(node.public_ips[1], "1.2.3.5") + self.assertEqual(node.extra['smp'], 1) + self.assertEqual( + node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") + + def test_list_sizes(self): + images = self.driver.list_sizes() + self.assertEqual(len(images), 6) + image = [i for i in images if i.id == 'small'][0] + self.assertEqual(image.id, 'small') + self.assertEqual(image.name, 'Small instance') + self.assertEqual(image.cpu, 2000) + self.assertEqual(image.ram, 1700) + self.assertEqual(image.disk, 160) + self.assertTrue(isinstance(image.price, float)) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), len(self.driver._standard_drives)) + + for uuid, values in list(self.driver._standard_drives.items()): + self.assertEqual( + len([image for image in images if image.id == uuid]), 1) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_create_node(self): + sizes = self.driver.list_sizes() + size = [s for s in sizes if s.id == 'large'][0] + image = self.image + + self.assertTrue(self.driver.create_node(name="api.ivan.net.nz", + image=image, size=size)) + + +class ElasticHostsTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + ElasticHosts.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = ElasticHosts('foo', 'bar') + images = self.driver.list_images() + self.image = [i for i in images if + i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0] + super(ElasticHostsTestCase, self).setUp() + + def test_multiple_drivers_with_different_regions(self): + driver1 = ElasticHosts('foo', 'bar', region='lon-p') + driver2 = ElasticHosts('foo', 'bar', region='sat-p') + + self.assertTrue(driver1.connection.host.startswith('api-lon-p')) + self.assertTrue(driver2.connection.host.startswith('api-sat-p')) + + driver1.list_nodes() + driver2.list_nodes() + driver1.list_nodes() + + self.assertTrue(driver1.connection.host.startswith('api-lon-p')) + self.assertTrue(driver2.connection.host.startswith('api-sat-p')) + + def test_invalid_region(self): + expected_msg = r'Invalid region.+' + self.assertRaisesRegexp(ValueError, expected_msg, ElasticHosts, + 'foo', 'bar', region='invalid') + + +class SkaliCloudTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + SkaliCloud.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = SkaliCloud('foo', 'bar') + + images = self.driver.list_images() + self.image = [i for i in images if + i.id == '90aa51f2-15c0-4cff-81ee-e93aa20b9468'][0] + super(SkaliCloudTestCase, self).setUp() + + +class ServerLoveTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + ServerLove.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = ServerLove('foo', 'bar') + + images = self.driver.list_images() + self.image = [i for i in images if + i.id == '679f5f44-0be7-4745-a658-cccd4334c1aa'][0] + super(ServerLoveTestCase, self).setUp() + + +class ElasticStackMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('elastichosts') + + def _servers_info_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_info_MALFORMED(self, method, url, body, headers): + body = "{malformed: '" + return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_info_PARSE_ERROR(self, method, url, body, headers): + return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_create(self, method, url, body, headers): + body = self.fixtures.load('drives_create.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method, + url, body, + headers): + # ElasticHosts image + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_90aa51f2_15c0_4cff_81ee_e93aa20b9468_gunzip(self, method, + url, body, + headers): + # Skalikloud image + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_679f5f44_0be7_4745_a658_cccd4334c1aa_gunzip(self, method, + url, body, + headers): + # ServerLove image + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers): + body = self.fixtures.load('drives_info.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_create(self, method, url, body, headers): + body = self.fixtures.load('servers_create.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_info(self, method, url, body, headers): + body = self.fixtures.load('servers_info.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_72258_set(self, method, url, body, headers): + body = '{}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_exoscale.py libcloud-0.15.1/libcloud/test/compute/test_exoscale.py --- libcloud-0.5.0/libcloud/test/compute/test_exoscale.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_exoscale.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from libcloud.compute.drivers.exoscale import ExoscaleNodeDriver +from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase + +from libcloud.test import unittest + + +class ExoscaleNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): + driver_klass = ExoscaleNodeDriver + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_gandi.py libcloud-0.15.1/libcloud/test/compute/test_gandi.py --- libcloud-0.5.0/libcloud/test/compute/test_gandi.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_gandi.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,322 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys +import random +import string + +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.gandi import GandiNodeDriver +from libcloud.common.gandi import GandiException +from libcloud.compute.types import NodeState + +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import GANDI_PARAMS +from libcloud.test.common.test_gandi import BaseGandiMockHttp + + +class GandiTests(unittest.TestCase): + + node_name = 'test2' + + def setUp(self): + GandiNodeDriver.connectionCls.conn_classes = ( + GandiMockHttp, GandiMockHttp) + GandiMockHttp.type = None + self.driver = GandiNodeDriver(*GANDI_PARAMS) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(len(nodes) > 0) + self.assertTrue(len(nodes[0].public_ips) > 1) + + def test_list_locations(self): + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + self.assertEqual(loc.country, 'France') + + def test_list_images(self): + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + images = self.driver.list_images(loc) + self.assertTrue(len(images) > 2) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertTrue(len(sizes) >= 1) + + def test_destroy_node_running(self): + nodes = self.driver.list_nodes() + test_node = list(filter(lambda x: x.state == NodeState.RUNNING, + nodes))[0] + self.assertTrue(self.driver.destroy_node(test_node)) + + def test_destroy_node_halted(self): + nodes = self.driver.list_nodes() + test_node = list(filter(lambda x: x.state == NodeState.TERMINATED, + nodes))[0] + self.assertTrue(self.driver.destroy_node(test_node)) + + def test_reboot_node(self): + nodes = self.driver.list_nodes() + test_node = list(filter(lambda x: x.state == NodeState.RUNNING, + nodes))[0] + self.assertTrue(self.driver.reboot_node(test_node)) + + def test_create_node(self): + login = 'libcloud' + passwd = ''.join(random.choice(string.ascii_letters) + for i in range(10)) + + # Get france datacenter + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + + # Get a debian image + images = self.driver.list_images(loc) + images = [x for x in images if x.name.lower().startswith('debian')] + img = list(filter(lambda x: '5' in x.name, images))[0] + + # Get a configuration size + size = self.driver.list_sizes()[0] + node = self.driver.create_node(name=self.node_name, login=login, + password=passwd, image=img, + location=loc, size=size) + self.assertEqual(node.name, self.node_name) + + def test_create_volume(self): + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + volume = self.driver.create_volume( + size=1024, name='libcloud', location=loc) + self.assertEqual(volume.name, 'libcloud') + self.assertEqual(volume.size, 1024) + + def test_list_volumes(self): + disks = self.driver.list_volumes() + self.assertTrue(len(disks) > 0) + + def test_destroy_volume(self): + volumes = self.driver.list_volumes() + test_vol = list(filter(lambda x: x.name == 'test_disk', + volumes))[0] + self.assertTrue(self.driver.destroy_volume(test_vol)) + + def test_attach_volume(self): + disks = self.driver.list_volumes() + nodes = self.driver.list_nodes() + res = self.driver.attach_volume(nodes[0], disks[0]) + self.assertTrue(res) + + def test_detach_volume(self): + disks = self.driver.list_volumes() + nodes = self.driver.list_nodes() + res = self.driver.detach_volume(nodes[0], disks[0]) + self.assertTrue(res) + + def test_ex_list_interfaces(self): + ifaces = self.driver.ex_list_interfaces() + self.assertTrue(len(ifaces) > 0) + + def test_ex_attach_interface(self): + ifaces = self.driver.ex_list_interfaces() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_attach_interface(nodes[0], ifaces[0]) + self.assertTrue(res) + + def test_ex_detach_interface(self): + ifaces = self.driver.ex_list_interfaces() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_detach_interface(nodes[0], ifaces[0]) + self.assertTrue(res) + + def test_ex_snapshot_disk(self): + disks = self.driver.list_volumes() + self.assertTrue(self.driver.ex_snapshot_disk(disks[2])) + self.assertRaises(GandiException, + self.driver.ex_snapshot_disk, disks[0]) + + def test_ex_update_disk(self): + disks = self.driver.list_volumes() + self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) + + +class GandiRatingTests(unittest.TestCase): + + """Tests where rating model is involved""" + + node_name = 'test2' + + def setUp(self): + GandiNodeDriver.connectionCls.conn_classes = ( + GandiMockRatingHttp, GandiMockRatingHttp) + GandiMockRatingHttp.type = None + self.driver = GandiNodeDriver(*GANDI_PARAMS) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 4) + + def test_create_node(self): + login = 'libcloud' + passwd = ''.join(random.choice(string.ascii_letters) + for i in range(10)) + + # Get france datacenter + loc = list(filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations()))[0] + + # Get a debian image + images = self.driver.list_images(loc) + images = [x for x in images if x.name.lower().startswith('debian')] + img = list(filter(lambda x: '5' in x.name, images))[0] + + # Get a configuration size + size = self.driver.list_sizes()[0] + node = self.driver.create_node(name=self.node_name, login=login, + password=passwd, image=img, + location=loc, size=size) + self.assertEqual(node.name, self.node_name) + + +class GandiMockHttp(BaseGandiMockHttp): + + fixtures = ComputeFileFixtures('gandi') + + def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_delete(self, method, url, body, headers): + body = self.fixtures.load('vm_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_reboot(self, method, url, body, headers): + body = self.fixtures.load('vm_reboot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_stop(self, method, url, body, headers): + body = self.fixtures.load('vm_stop.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_iface_list(self, method, url, body, headers): + body = self.fixtures.load('iface_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_list(self, method, url, body, headers): + body = self.fixtures.load('disk_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_iface_attach(self, method, url, body, headers): + body = self.fixtures.load('iface_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_iface_detach(self, method, url, body, headers): + body = self.fixtures.load('iface_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_disk_attach(self, method, url, body, headers): + body = self.fixtures.load('disk_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_disk_detach(self, method, url, body, headers): + body = self.fixtures.load('disk_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_create(self, method, url, body, headers): + body = self.fixtures.load('disk_create.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_create_from(self, method, url, body, headers): + body = self.fixtures.load('disk_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_info(self, method, url, body, headers): + body = self.fixtures.load('disk_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_update(self, method, url, body, headers): + body = self.fixtures.load('disk_update.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_disk_delete(self, method, url, body, headers): + body = self.fixtures.load('disk_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class GandiMockRatingHttp(BaseGandiMockHttp): + + """Fixtures needed for tests related to rating model""" + + fixtures = ComputeFileFixtures('gandi') + + def _xmlrpc__hosting_datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__hosting_vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + # Specific to rating tests + def _xmlrpc__hosting_account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info_rating.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_gce.py libcloud-0.15.1/libcloud/test/compute/test_gce.py --- libcloud-0.5.0/libcloud/test/compute/test_gce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_gce.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,1278 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for Google Compute Engine Driver +""" +import sys +import unittest +import datetime + +from libcloud.utils.py3 import httplib +from libcloud.compute.drivers.gce import (GCENodeDriver, API_VERSION, + timestamp_to_datetime, + GCEAddress, GCEHealthCheck, + GCEFirewall, GCEForwardingRule, + GCENetwork, + GCEZone) +from libcloud.common.google import (GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleBaseConnection, + ResourceNotFoundError, ResourceExistsError) +from libcloud.test.common.test_google import GoogleAuthMockHttp +from libcloud.compute.base import Node, StorageVolume + +from libcloud.test import MockHttpTestCase, LibcloudTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS + + +class GCENodeDriverTest(LibcloudTestCase, TestCaseMixin): + + """ + Google Compute Engine Test Class. + """ + # Mock out a few specific calls that interact with the user, system or + # environment. + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + GCEZone._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0) + datacenter = 'us-central1-a' + + def setUp(self): + GCEMockHttp.test = self + GCENodeDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp) + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + GCEMockHttp.type = None + kwargs = GCE_KEYWORD_PARAMS.copy() + kwargs['auth_type'] = 'IA' + kwargs['datacenter'] = self.datacenter + self.driver = GCENodeDriver(*GCE_PARAMS, **kwargs) + + def test_default_scopes(self): + self.assertEqual(self.driver.scopes, None) + + def test_timestamp_to_datetime(self): + timestamp1 = '2013-06-26T10:05:19.340-07:00' + datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19) + self.assertEqual(timestamp_to_datetime(timestamp1), datetime1) + timestamp2 = '2013-06-26T17:43:15.000-00:00' + datetime2 = datetime.datetime(2013, 6, 26, 17, 43, 15) + self.assertEqual(timestamp_to_datetime(timestamp2), datetime2) + + def test_get_region_from_zone(self): + zone1 = self.driver.ex_get_zone('us-central1-a') + expected_region1 = 'us-central1' + region1 = self.driver._get_region_from_zone(zone1) + self.assertEqual(region1.name, expected_region1) + zone2 = self.driver.ex_get_zone('europe-west1-b') + expected_region2 = 'europe-west1' + region2 = self.driver._get_region_from_zone(zone2) + self.assertEqual(region2.name, expected_region2) + + def test_find_zone_or_region(self): + zone1 = self.driver._find_zone_or_region('libcloud-demo-np-node', + 'instances') + self.assertEqual(zone1.name, 'us-central2-a') + zone2 = self.driver._find_zone_or_region( + 'libcloud-demo-europe-np-node', 'instances') + self.assertEqual(zone2.name, 'europe-west1-a') + region = self.driver._find_zone_or_region('libcloud-demo-address', + 'addresses', region=True) + self.assertEqual(region.name, 'us-central1') + + def test_match_images(self): + project = 'debian-cloud' + image = self.driver._match_images(project, 'debian-7') + self.assertEqual(image.name, 'debian-7-wheezy-v20131120') + image = self.driver._match_images(project, 'debian-6') + self.assertEqual(image.name, 'debian-6-squeeze-v20130926') + + def test_ex_list_addresses(self): + address_list = self.driver.ex_list_addresses() + address_list_all = self.driver.ex_list_addresses('all') + address_list_uc1 = self.driver.ex_list_addresses('us-central1') + self.assertEqual(len(address_list), 2) + self.assertEqual(len(address_list_all), 4) + self.assertEqual(address_list[0].name, 'libcloud-demo-address') + self.assertEqual(address_list_uc1[0].name, 'libcloud-demo-address') + names = [a.name for a in address_list_all] + self.assertTrue('libcloud-demo-address' in names) + + def test_ex_list_healthchecks(self): + healthchecks = self.driver.ex_list_healthchecks() + self.assertEqual(len(healthchecks), 3) + self.assertEqual(healthchecks[0].name, 'basic-check') + + def test_ex_list_firewalls(self): + firewalls = self.driver.ex_list_firewalls() + self.assertEqual(len(firewalls), 5) + self.assertEqual(firewalls[0].name, 'default-allow-internal') + + def test_ex_list_forwarding_rules(self): + forwarding_rules = self.driver.ex_list_forwarding_rules() + forwarding_rules_all = self.driver.ex_list_forwarding_rules('all') + forwarding_rules_uc1 = self.driver.ex_list_forwarding_rules( + 'us-central1') + self.assertEqual(len(forwarding_rules), 2) + self.assertEqual(len(forwarding_rules_all), 2) + self.assertEqual(forwarding_rules[0].name, 'lcforwardingrule') + self.assertEqual(forwarding_rules_uc1[0].name, 'lcforwardingrule') + names = [f.name for f in forwarding_rules_all] + self.assertTrue('lcforwardingrule' in names) + + def test_list_images(self): + local_images = self.driver.list_images() + debian_images = self.driver.list_images(ex_project='debian-cloud') + self.assertEqual(len(local_images), 3) + self.assertEqual(len(debian_images), 19) + self.assertEqual(local_images[0].name, 'debian-7-wheezy-v20130617') + self.assertEqual(local_images[1].name, 'centos-6-v20131118') + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 5) + self.assertEqual(locations[0].name, 'europe-west1-a') + + def test_ex_list_networks(self): + networks = self.driver.ex_list_networks() + self.assertEqual(len(networks), 3) + self.assertEqual(networks[0].name, 'default') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + nodes_all = self.driver.list_nodes(ex_zone='all') + nodes_uc1a = self.driver.list_nodes(ex_zone='us-central1-a') + self.assertEqual(len(nodes), 1) + self.assertEqual(len(nodes_all), 8) + self.assertEqual(len(nodes_uc1a), 1) + self.assertEqual(nodes[0].name, 'node-name') + self.assertEqual(nodes_uc1a[0].name, 'node-name') + names = [n.name for n in nodes_all] + self.assertTrue('node-name' in names) + + def test_ex_list_regions(self): + regions = self.driver.ex_list_regions() + self.assertEqual(len(regions), 3) + self.assertEqual(regions[0].name, 'europe-west1') + + def test_ex_list_snapshots(self): + snapshots = self.driver.ex_list_snapshots() + self.assertEqual(len(snapshots), 2) + self.assertEqual(snapshots[0].name, 'lcsnapshot') + + def test_ex_list_targetpools(self): + target_pools = self.driver.ex_list_targetpools() + target_pools_all = self.driver.ex_list_targetpools('all') + target_pools_uc1 = self.driver.ex_list_targetpools('us-central1') + self.assertEqual(len(target_pools), 2) + self.assertEqual(len(target_pools_all), 3) + self.assertEqual(len(target_pools_uc1), 2) + self.assertEqual(target_pools[0].name, 'lctargetpool') + self.assertEqual(target_pools_uc1[0].name, 'lctargetpool') + names = [t.name for t in target_pools_all] + self.assertTrue('www-pool' in names) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + sizes_all = self.driver.list_sizes('all') + self.assertEqual(len(sizes), 22) + self.assertEqual(len(sizes_all), 100) + self.assertEqual(sizes[0].name, 'f1-micro') + self.assertEqual(sizes[0].extra['zone'].name, 'us-central1-a') + names = [s.name for s in sizes_all] + self.assertEqual(names.count('n1-standard-1'), 5) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + volumes_all = self.driver.list_volumes('all') + volumes_uc1a = self.driver.list_volumes('us-central1-a') + self.assertEqual(len(volumes), 2) + self.assertEqual(len(volumes_all), 10) + self.assertEqual(len(volumes_uc1a), 2) + self.assertEqual(volumes[0].name, 'lcdisk') + self.assertEqual(volumes_uc1a[0].name, 'lcdisk') + names = [v.name for v in volumes_all] + self.assertTrue('libcloud-demo-europe-boot-disk' in names) + + def test_ex_list_zones(self): + zones = self.driver.ex_list_zones() + self.assertEqual(len(zones), 5) + self.assertEqual(zones[0].name, 'europe-west1-a') + + def test_ex_create_address(self): + address_name = 'lcaddress' + address = self.driver.ex_create_address(address_name) + self.assertTrue(isinstance(address, GCEAddress)) + self.assertEqual(address.name, address_name) + + def test_ex_create_healthcheck(self): + healthcheck_name = 'lchealthcheck' + kwargs = {'host': 'lchost', + 'path': '/lc', + 'port': 8000, + 'interval': 10, + 'timeout': 10, + 'unhealthy_threshold': 4, + 'healthy_threshold': 3} + hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs) + self.assertTrue(isinstance(hc, GCEHealthCheck)) + self.assertEqual(hc.name, healthcheck_name) + self.assertEqual(hc.path, '/lc') + self.assertEqual(hc.port, 8000) + self.assertEqual(hc.interval, 10) + + def test_ex_create_firewall(self): + firewall_name = 'lcfirewall' + allowed = [{'IPProtocol': 'tcp', 'ports': ['4567']}] + source_tags = ['libcloud'] + firewall = self.driver.ex_create_firewall(firewall_name, allowed, + source_tags=source_tags) + self.assertTrue(isinstance(firewall, GCEFirewall)) + self.assertEqual(firewall.name, firewall_name) + + def test_ex_create_forwarding_rule(self): + fwr_name = 'lcforwardingrule' + targetpool = 'lctargetpool' + region = 'us-central1' + fwr = self.driver.ex_create_forwarding_rule(fwr_name, targetpool, + region=region, + port_range='8000-8500') + self.assertTrue(isinstance(fwr, GCEForwardingRule)) + self.assertEqual(fwr.name, fwr_name) + + def test_ex_create_network(self): + network_name = 'lcnetwork' + cidr = '10.11.0.0/16' + network = self.driver.ex_create_network(network_name, cidr) + self.assertTrue(isinstance(network, GCENetwork)) + self.assertEqual(network.name, network_name) + self.assertEqual(network.cidr, cidr) + + def test_create_node_req(self): + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + location = self.driver.zone + network = self.driver.ex_get_network('default') + tags = ['libcloud'] + metadata = [{'key': 'test_key', 'value': 'test_value'}] + boot_disk = self.driver.ex_get_volume('lcdisk') + node_request, node_data = self.driver._create_node_req('lcnode', size, + image, location, + network, tags, + metadata, + boot_disk) + self.assertEqual(node_request, '/zones/%s/instances' % location.name) + self.assertEqual(node_data['metadata'][0]['key'], 'test_key') + self.assertEqual(node_data['tags']['items'][0], 'libcloud') + self.assertEqual(node_data['name'], 'lcnode') + self.assertTrue(node_data['disks'][0]['boot']) + + def test_create_node(self): + node_name = 'node-name' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + node = self.driver.create_node(node_name, size, image) + self.assertTrue(isinstance(node, Node)) + self.assertEqual(node.name, node_name) + + def test_create_node_existing(self): + node_name = 'libcloud-demo-europe-np-node' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1', zone='europe-west1-a') + self.assertRaises(ResourceExistsError, self.driver.create_node, + node_name, size, image, location='europe-west1-a') + + def test_ex_create_multiple_nodes(self): + base_name = 'lcnode' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + number = 2 + nodes = self.driver.ex_create_multiple_nodes(base_name, size, image, + number) + self.assertEqual(len(nodes), 2) + self.assertTrue(isinstance(nodes[0], Node)) + self.assertTrue(isinstance(nodes[1], Node)) + self.assertEqual(nodes[0].name, '%s-000' % base_name) + self.assertEqual(nodes[1].name, '%s-001' % base_name) + + def test_ex_create_targetpool(self): + targetpool_name = 'lctargetpool' + region = 'us-central1' + healthchecks = ['libcloud-lb-demo-healthcheck'] + node1 = self.driver.ex_get_node('libcloud-lb-demo-www-000', + 'us-central1-b') + node2 = self.driver.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + nodes = [node1, node2] + targetpool = self.driver.ex_create_targetpool( + targetpool_name, region=region, healthchecks=healthchecks, + nodes=nodes) + self.assertEqual(targetpool.name, targetpool_name) + self.assertEqual(len(targetpool.nodes), len(nodes)) + self.assertEqual(targetpool.region.name, region) + + def test_ex_create_volume_snapshot(self): + snapshot_name = 'lcsnapshot' + volume = self.driver.ex_get_volume('lcdisk') + snapshot = volume.snapshot(snapshot_name) + self.assertEqual(snapshot.name, snapshot_name) + self.assertEqual(snapshot.size, '1') + + def test_create_volume(self): + volume_name = 'lcdisk' + size = 1 + volume = self.driver.create_volume(size, volume_name) + self.assertTrue(isinstance(volume, StorageVolume)) + self.assertEqual(volume.name, volume_name) + + def test_ex_update_healthcheck(self): + healthcheck_name = 'lchealthcheck' + healthcheck = self.driver.ex_get_healthcheck(healthcheck_name) + healthcheck.port = 9000 + healthcheck2 = self.driver.ex_update_healthcheck(healthcheck) + self.assertTrue(isinstance(healthcheck2, GCEHealthCheck)) + + def test_ex_update_firewall(self): + firewall_name = 'lcfirewall' + firewall = self.driver.ex_get_firewall(firewall_name) + firewall.source_ranges = ['10.0.0.0/16'] + firewall.source_tags = ['libcloud', 'test'] + firewall2 = self.driver.ex_update_firewall(firewall) + self.assertTrue(isinstance(firewall2, GCEFirewall)) + + def test_ex_targetpool_remove_add_node(self): + targetpool = self.driver.ex_get_targetpool('lctargetpool') + node = self.driver.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + remove_node = self.driver.ex_targetpool_remove_node(targetpool, node) + self.assertTrue(remove_node) + self.assertEqual(len(targetpool.nodes), 1) + + add_node = self.driver.ex_targetpool_add_node(targetpool, node) + self.assertTrue(add_node) + self.assertEqual(len(targetpool.nodes), 2) + + def test_ex_targetpool_remove_add_healthcheck(self): + targetpool = self.driver.ex_get_targetpool('lctargetpool') + healthcheck = self.driver.ex_get_healthcheck( + 'libcloud-lb-demo-healthcheck') + remove_healthcheck = self.driver.ex_targetpool_remove_healthcheck( + targetpool, healthcheck) + self.assertTrue(remove_healthcheck) + self.assertEqual(len(targetpool.healthchecks), 0) + + add_healthcheck = self.driver.ex_targetpool_add_healthcheck( + targetpool, healthcheck) + self.assertTrue(add_healthcheck) + self.assertEqual(len(targetpool.healthchecks), 1) + + def test_reboot_node(self): + node = self.driver.ex_get_node('node-name') + reboot = self.driver.reboot_node(node) + self.assertTrue(reboot) + + def test_ex_set_node_tags(self): + new_tags = ['libcloud'] + node = self.driver.ex_get_node('node-name') + set_tags = self.driver.ex_set_node_tags(node, new_tags) + self.assertTrue(set_tags) + + def test_attach_volume(self): + volume = self.driver.ex_get_volume('lcdisk') + node = self.driver.ex_get_node('node-name') + attach = volume.attach(node) + self.assertTrue(attach) + + def test_detach_volume(self): + volume = self.driver.ex_get_volume('lcdisk') + node = self.driver.ex_get_node('node-name') + # This fails since the node is required + detach = volume.detach() + self.assertFalse(detach) + # This should pass + detach = self.driver.detach_volume(volume, node) + self.assertTrue(detach) + + def test_ex_destroy_address(self): + address = self.driver.ex_get_address('lcaddress') + destroyed = address.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_healthcheck(self): + hc = self.driver.ex_get_healthcheck('lchealthcheck') + destroyed = hc.destroy() + self.assertTrue(destroyed) + + def test_ex_delete_image(self): + image = self.driver.ex_get_image('debian-7') + deleted = self.driver.ex_delete_image(image) + self.assertTrue(deleted) + + def test_ex_deprecate_image(self): + image = self.driver.ex_get_image('debian-6') + deprecated = image.deprecate('debian-7', 'DEPRECATED') + self.assertTrue(deprecated) + + def test_ex_destroy_firewall(self): + firewall = self.driver.ex_get_firewall('lcfirewall') + destroyed = firewall.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_forwarding_rule(self): + fwr = self.driver.ex_get_forwarding_rule('lcforwardingrule') + destroyed = fwr.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_network(self): + network = self.driver.ex_get_network('lcnetwork') + destroyed = network.destroy() + self.assertTrue(destroyed) + + def test_destroy_node(self): + node = self.driver.ex_get_node('node-name') + destroyed = node.destroy() + self.assertTrue(destroyed) + + def test_ex_destroy_multiple_nodes(self): + nodes = [] + nodes.append(self.driver.ex_get_node('lcnode-000')) + nodes.append(self.driver.ex_get_node('lcnode-001')) + destroyed = self.driver.ex_destroy_multiple_nodes(nodes) + for d in destroyed: + self.assertTrue(d) + + def test_destroy_targetpool(self): + targetpool = self.driver.ex_get_targetpool('lctargetpool') + destroyed = targetpool.destroy() + self.assertTrue(destroyed) + + def test_destroy_volume(self): + disk = self.driver.ex_get_volume('lcdisk') + destroyed = disk.destroy() + self.assertTrue(destroyed) + + def test_ex_set_volume_auto_delete(self): + node = self.driver.ex_get_node('node-name') + volume = node.extra['boot_disk'] + auto_delete = self.driver.ex_set_volume_auto_delete( + volume, node) + self.assertTrue(auto_delete) + + def test_destroy_volume_snapshot(self): + snapshot = self.driver.ex_get_snapshot('lcsnapshot') + destroyed = snapshot.destroy() + self.assertTrue(destroyed) + + def test_ex_get_address(self): + address_name = 'lcaddress' + address = self.driver.ex_get_address(address_name) + self.assertEqual(address.name, address_name) + self.assertEqual(address.address, '173.255.113.20') + self.assertEqual(address.region.name, 'us-central1') + self.assertEqual(address.extra['status'], 'RESERVED') + + def test_ex_get_healthcheck(self): + healthcheck_name = 'lchealthcheck' + healthcheck = self.driver.ex_get_healthcheck(healthcheck_name) + self.assertEqual(healthcheck.name, healthcheck_name) + self.assertEqual(healthcheck.port, 8000) + self.assertEqual(healthcheck.path, '/lc') + + def test_ex_get_firewall(self): + firewall_name = 'lcfirewall' + firewall = self.driver.ex_get_firewall(firewall_name) + self.assertEqual(firewall.name, firewall_name) + self.assertEqual(firewall.network.name, 'default') + self.assertEqual(firewall.source_tags, ['libcloud']) + + def test_ex_get_forwarding_rule(self): + fwr_name = 'lcforwardingrule' + fwr = self.driver.ex_get_forwarding_rule(fwr_name) + self.assertEqual(fwr.name, fwr_name) + self.assertEqual(fwr.extra['portRange'], '8000-8500') + self.assertEqual(fwr.targetpool.name, 'lctargetpool') + self.assertEqual(fwr.protocol, 'TCP') + + def test_ex_get_image(self): + partial_name = 'debian-7' + image = self.driver.ex_get_image(partial_name) + self.assertEqual(image.name, 'debian-7-wheezy-v20130617') + # A 'debian-7' image exists in the local project + self.assertTrue(image.extra['description'].startswith('Local')) + + partial_name = 'debian-6' + image = self.driver.ex_get_image(partial_name) + self.assertEqual(image.name, 'debian-6-squeeze-v20130926') + self.assertTrue(image.extra['description'].startswith('Debian')) + + def test_ex_copy_image(self): + name = 'coreos' + url = 'gs://storage.core-os.net/coreos/amd64-generic/247.0.0/coreos_production_gce.tar.gz' + description = 'CoreOS test image' + image = self.driver.ex_copy_image(name, url, description) + self.assertEqual(image.name, name) + self.assertEqual(image.extra['description'], description) + + def test_ex_get_network(self): + network_name = 'lcnetwork' + network = self.driver.ex_get_network(network_name) + self.assertEqual(network.name, network_name) + self.assertEqual(network.cidr, '10.11.0.0/16') + self.assertEqual(network.extra['gatewayIPv4'], '10.11.0.1') + + def test_ex_get_node(self): + node_name = 'node-name' + zone = 'us-central1-a' + node = self.driver.ex_get_node(node_name, zone) + self.assertEqual(node.name, node_name) + self.assertEqual(node.size, 'n1-standard-1') + removed_node = 'libcloud-lb-demo-www-002' + self.assertRaises(ResourceNotFoundError, self.driver.ex_get_node, + removed_node, 'us-central1-b') + missing_node = 'dummy-node' + self.assertRaises(ResourceNotFoundError, self.driver.ex_get_node, + missing_node, 'all') + + def test_ex_get_project(self): + project = self.driver.ex_get_project() + self.assertEqual(project.name, 'project_name') + networks_quota = project.quotas[1] + self.assertEqual(networks_quota['usage'], 3.0) + self.assertEqual(networks_quota['limit'], 5.0) + self.assertEqual(networks_quota['metric'], 'NETWORKS') + + def test_ex_get_region(self): + region_name = 'us-central1' + region = self.driver.ex_get_region(region_name) + self.assertEqual(region.name, region_name) + self.assertEqual(region.status, 'UP') + self.assertEqual(region.zones[0].name, 'us-central1-a') + + def test_ex_get_size(self): + size_name = 'n1-standard-1' + size = self.driver.ex_get_size(size_name) + self.assertEqual(size.name, size_name) + self.assertEqual(size.extra['zone'].name, 'us-central1-a') + self.assertEqual(size.disk, 10) + self.assertEqual(size.ram, 3840) + self.assertEqual(size.extra['guestCpus'], 1) + + def test_ex_get_targetpool(self): + targetpool_name = 'lctargetpool' + targetpool = self.driver.ex_get_targetpool(targetpool_name) + self.assertEqual(targetpool.name, targetpool_name) + self.assertEqual(len(targetpool.nodes), 2) + self.assertEqual(targetpool.region.name, 'us-central1') + + def test_ex_get_snapshot(self): + snapshot_name = 'lcsnapshot' + snapshot = self.driver.ex_get_snapshot(snapshot_name) + self.assertEqual(snapshot.name, snapshot_name) + self.assertEqual(snapshot.size, '1') + self.assertEqual(snapshot.status, 'READY') + + def test_ex_get_volume(self): + volume_name = 'lcdisk' + volume = self.driver.ex_get_volume(volume_name) + self.assertEqual(volume.name, volume_name) + self.assertEqual(volume.size, '1') + self.assertEqual(volume.extra['status'], 'READY') + + def test_ex_get_zone(self): + zone_name = 'us-central1-b' + zone = self.driver.ex_get_zone(zone_name) + self.assertEqual(zone.name, zone_name) + self.assertFalse(zone.time_until_mw) + self.assertFalse(zone.next_mw_duration) + + zone_no_mw = self.driver.ex_get_zone('us-central1-a') + self.assertEqual(zone_no_mw.time_until_mw, None) + + +class GCEMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('gce') + json_hdr = {'content-type': 'application/json; charset=UTF-8'} + + def _get_method_name(self, type, use_param, qs, path): + api_path = '/compute/%s' % API_VERSION + project_path = '/projects/%s' % GCE_KEYWORD_PARAMS['project'] + path = path.replace(api_path, '') + # This replace is separate, since there is a call with a different + # project name + path = path.replace(project_path, '') + # The path to get project information is the base path, so use a fake + # '/project' path instead + if not path: + path = '/project' + method_name = super(GCEMockHttp, self)._get_method_name(type, + use_param, + qs, path) + return method_name + + def _aggregated_addresses(self, method, url, body, headers): + body = self.fixtures.load('aggregated_addresses.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_disks(self, method, url, body, headers): + body = self.fixtures.load('aggregated_disks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_forwardingRules(self, method, url, body, headers): + body = self.fixtures.load('aggregated_forwardingRules.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_instances(self, method, url, body, headers): + body = self.fixtures.load('aggregated_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_machineTypes(self, method, url, body, headers): + body = self.fixtures.load('aggregated_machineTypes.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _aggregated_targetPools(self, method, url, body, headers): + body = self.fixtures.load('aggregated_targetPools.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_httpHealthChecks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_httpHealthChecks_post.json') + else: + body = self.fixtures.load('global_httpHealthChecks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_httpHealthChecks_basic_check(self, method, url, body, headers): + body = self.fixtures.load('global_httpHealthChecks_basic-check.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_httpHealthChecks_libcloud_lb_demo_healthcheck( + self, method, url, body, headers): + body = self.fixtures.load( + 'global_httpHealthChecks_libcloud-lb-demo-healthcheck.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_httpHealthChecks_lchealthcheck(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'global_httpHealthChecks_lchealthcheck_delete.json') + elif method == 'PUT': + body = self.fixtures.load( + 'global_httpHealthChecks_lchealthcheck_put.json') + else: + body = self.fixtures.load( + 'global_httpHealthChecks_lchealthcheck.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_firewalls(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_firewalls_post.json') + else: + body = self.fixtures.load('global_firewalls.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_firewalls_lcfirewall(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'global_firewalls_lcfirewall_delete.json') + elif method == 'PUT': + body = self.fixtures.load('global_firewalls_lcfirewall_put.json') + else: + body = self.fixtures.load('global_firewalls_lcfirewall.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_images(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_images_post.json') + else: + body = self.fixtures.load('global_images.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_images_debian_7_wheezy_v20130617( + self, method, url, body, headers): + body = self.fixtures.load('global_images_debian_7_wheezy_v20130617_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_images_debian_6_squeeze_v20130926_deprecate( + self, method, url, body, headers): + body = self.fixtures.load('global_images_debian_6_squeeze_v20130926_deprecate.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('global_networks_post.json') + else: + body = self.fixtures.load('global_networks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_default(self, method, url, body, headers): + body = self.fixtures.load('global_networks_default.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_libcloud_demo_network(self, method, url, body, + headers): + body = self.fixtures.load('global_networks_libcloud-demo-network.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_libcloud_demo_europe_network(self, method, url, body, + headers): + body = self.fixtures.load( + 'global_networks_libcloud-demo-europe-network.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_networks_lcnetwork(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load('global_networks_lcnetwork_delete.json') + else: + body = self.fixtures.load('global_networks_lcnetwork.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_snapshots(self, method, url, body, headers): + body = self.fixtures.load('global_snapshots.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_snapshots_lcsnapshot(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'global_snapshots_lcsnapshot_delete.json') + else: + body = self.fixtures.load('global_snapshots_lcsnapshot.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_httpHealthChecks_lchealthcheck_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_httpHealthChecks_lchealthcheck_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_images_debian7_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_images_debian7_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_httpHealthChecks_lchealthcheck_put( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_httpHealthChecks_lchealthcheck_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_httpHealthChecks_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_httpHealthChecks_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_lcfirewall_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_lcfirewall_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_lcfirewall_put( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_lcfirewall_put.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_firewalls_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_firewalls_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_networks_lcnetwork_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_networks_lcnetwork_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_networks_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_networks_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_snapshots_lcsnapshot_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_snapshots_lcsnapshot_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _global_operations_operation_global_image_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_global_image_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_addresses_lcaddress_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_addresses_lcaddress_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_addresses_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_addresses_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_forwardingRules_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_forwardingRules_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_forwardingRules_lcforwardingrule_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_forwardingRules_lcforwardingrule_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_lctargetpool_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_removeHealthCheck_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_addHealthCheck_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_removeInstance_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_lctargetpool_removeInstance_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_operations_operation_regions_us_central1_targetPools_lctargetpool_addInstance_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_regions_us-central1_targetPools_lctargetpool_addInstance_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_disks_lcdisk_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_setDiskAutoDelete( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us_central1_a_instances_node_name_setDiskAutoDelete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_volume_auto_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us_central1_a_operations_operation_volume_auto_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_createSnapshot_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_disks_lcdisk_createSnapshot_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_disks_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_000_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_lcnode-000_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_lcnode_001_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_lcnode-001_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_attachDisk_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_attachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_detachDisk_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_detachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_setTags_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_setTags_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_node_name_reset_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_node-name_reset_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_operations_operation_zones_europe_west1_a_instances_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_europe-west1-a_instances_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_zones_us_central1_a_instances_post( + self, method, url, body, headers): + body = self.fixtures.load( + 'operations_operation_zones_us-central1-a_instances_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _project(self, method, url, body, headers): + body = self.fixtures.load('project.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _projects_debian_cloud_global_images(self, method, url, body, headers): + body = self.fixtures.load('projects_debian-cloud_global_images.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions(self, method, url, body, headers): + body = self.fixtures.load( + 'regions.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_addresses(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'regions_us-central1_addresses_post.json') + else: + body = self.fixtures.load('regions_us-central1_addresses.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_addresses_lcaddress(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'regions_us-central1_addresses_lcaddress_delete.json') + else: + body = self.fixtures.load( + 'regions_us-central1_addresses_lcaddress.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_forwardingRules(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'regions_us-central1_forwardingRules_post.json') + else: + body = self.fixtures.load( + 'regions_us-central1_forwardingRules.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_forwardingRules_libcloud_lb_demo_lb( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_forwardingRules_libcloud-lb-demo-lb.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_forwardingRules_lcforwardingrule( + self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'regions_us-central1_forwardingRules_lcforwardingrule_delete.json') + else: + body = self.fixtures.load( + 'regions_us-central1_forwardingRules_lcforwardingrule.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'regions_us-central1_targetPools_post.json') + else: + body = self.fixtures.load('regions_us-central1_targetPools.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_lctargetpool(self, method, url, + body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_delete.json') + else: + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_libcloud_lb_demo_lb_tp( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_libcloud-lb-demo-lb-tp.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_lctargetpool_removeHealthCheck( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_removeHealthCheck_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_lctargetpool_addHealthCheck( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_addHealthCheck_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_lctargetpool_removeInstance( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_removeInstance_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _regions_us_central1_targetPools_lctargetpool_addInstance( + self, method, url, body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_addInstance_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones(self, method, url, body, headers): + body = self.fixtures.load('zones.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('zones_us-central1-a_disks_post.json') + else: + body = self.fixtures.load('zones_us-central1-a_disks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_lcdisk(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_disks_lcdisk_delete.json') + else: + body = self.fixtures.load('zones_us-central1-a_disks_lcdisk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_lcdisk_createSnapshot(self, method, url, + body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_disks_lcdisk_createSnapshot_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_node_name(self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_lcnode_000( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_disks_lcnode_001( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_disks_libcloud_lb_demo_www_000( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_disks_libcloud_lb_demo_www_001( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_disks_libcloud_lb_demo_www_002( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central2_a_disks_libcloud_demo_boot_disk( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central2_a_disks_libcloud_demo_np_node( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central2_a_disks_libcloud_demo_multiple_nodes_000( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central2_a_disks_libcloud_demo_multiple_nodes_001( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_disks(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('zones_us-central1-a_disks_post.json') + else: + body = self.fixtures.load('zones_us-central1-a_disks.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_disks_libcloud_demo_europe_np_node( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_disks_libcloud_demo_europe_boot_disk( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_disks_libcloud_demo_europe_multiple_nodes_000( + self, method, url, body, headers): + body = self.fixtures.load('generic_disk.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_instances(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'zones_europe-west1-a_instances_post.json') + else: + body = self.fixtures.load('zones_europe-west1-a_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load( + 'zones_us-central1-a_instances_post.json') + else: + body = self.fixtures.load('zones_us-central1-a_instances.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_attachDisk( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_attachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_detachDisk( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_detachDisk_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_setTags( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_setTags_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_node_name_reset( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_instances_node-name_reset_post.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_lcnode_000(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-000_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-000.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_instances_lcnode_001(self, method, url, body, + headers): + if method == 'DELETE': + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-001_delete.json') + else: + body = self.fixtures.load( + 'zones_us-central1-a_instances_lcnode-001.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_instances_libcloud_lb_demo_www_000( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-b_instances_libcloud-lb-demo-www-000.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_instances_libcloud_lb_demo_www_001( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-b_instances_libcloud-lb-demo-www-001.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_b_instances_libcloud_lb_demo_www_002( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us-central1-b_instances_libcloud-lb-demo-www-002.json') + return (httplib.NOT_FOUND, body, self.json_hdr, + httplib.responses[httplib.NOT_FOUND]) + + def _zones_us_central1_a(self, method, url, body, headers): + body = self.fixtures.load('zones_us-central1-a.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_machineTypes(self, method, url, body, headers): + body = self.fixtures.load('zones_us-central1-a_machineTypes.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_europe_west1_a_machineTypes_n1_standard_1(self, method, url, + body, headers): + body = self.fixtures.load( + 'zones_europe-west1-a_machineTypes_n1-standard-1.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_machineTypes_n1_standard_1(self, method, url, + body, headers): + body = self.fixtures.load( + 'zones_us-central1-a_machineTypes_n1-standard-1.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_gogrid.py libcloud-0.15.1/libcloud/test/compute/test_gogrid.py --- libcloud-0.5.0/libcloud/test/compute/test_gogrid.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_gogrid.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,283 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs + +from libcloud.compute.base import NodeState, NodeLocation +from libcloud.common.types import LibcloudError, InvalidCredsError +from libcloud.common.gogrid import GoGridIpAddress +from libcloud.compute.drivers.gogrid import GoGridNodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize + +from libcloud.test import MockHttp # pylint: disable-msg=E0611 +from libcloud.test.compute import TestCaseMixin # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 + + +class GoGridTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + GoGridNodeDriver.connectionCls.conn_classes = (None, GoGridMockHttp) + GoGridMockHttp.type = None + self.driver = GoGridNodeDriver("foo", "bar") + + def _get_test_512Mb_node_size(self): + return NodeSize(id='512Mb', + name=None, + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self.driver) + + def test_create_node(self): + image = NodeImage(1531, None, self.driver) + node = self.driver.create_node( + name='test1', + image=image, + size=self._get_test_512Mb_node_size()) + self.assertEqual(node.name, 'test1') + self.assertTrue(node.id is not None) + self.assertEqual(node.extra['password'], 'bebebe') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + + self.assertEqual(node.id, '90967') + self.assertEqual(node.extra['password'], 'bebebe') + self.assertEqual(node.extra['description'], 'test server') + + def test_reboot_node(self): + node = Node(90967, None, None, None, None, self.driver) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_reboot_node_not_successful(self): + GoGridMockHttp.type = 'FAIL' + node = Node(90967, None, None, None, None, self.driver) + + try: + self.driver.reboot_node(node) + except Exception: + pass + else: + self.fail('Exception was not thrown') + + def test_destroy_node(self): + node = Node(90967, None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(len(images), 4) + self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') + self.assertEqual(image.id, '1531') + + location = NodeLocation( + id='gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img', + name='test location', country='Slovenia', + driver=self.driver) + images = self.driver.list_images(location=location) + image = images[0] + self.assertEqual(len(images), 4) + self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') + self.assertEqual(image.id, '1531') + + def test_malformed_reply(self): + GoGridMockHttp.type = 'FAIL' + try: + self.driver.list_images() + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(isinstance(e, LibcloudError)) + else: + self.fail("test should have thrown") + + def test_invalid_creds(self): + GoGridMockHttp.type = 'FAIL' + try: + self.driver.list_nodes() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertTrue(e.driver is not None) + self.assertEqual(e.driver.name, self.driver.name) + else: + self.fail("test should have thrown") + + def test_node_creation_without_free_public_ips(self): + GoGridMockHttp.type = 'NOPUBIPS' + try: + image = NodeImage(1531, None, self.driver) + self.driver.create_node( + name='test1', + image=image, + size=self._get_test_512Mb_node_size()) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(isinstance(e, LibcloudError)) + self.assertTrue(e.driver is not None) + self.assertEqual(e.driver.name, self.driver.name) + else: + self.fail("test should have thrown") + + def test_list_locations(self): + locations = self.driver.list_locations() + location_names = [location.name for location in locations] + + self.assertEqual(len(locations), 2) + for i in 0, 1: + self.assertTrue(isinstance(locations[i], NodeLocation)) + self.assertTrue("US-West-1" in location_names) + self.assertTrue("US-East-1" in location_names) + + def test_ex_save_image(self): + node = self.driver.list_nodes()[0] + image = self.driver.ex_save_image(node, "testimage") + self.assertEqual(image.name, "testimage") + + def test_ex_edit_image(self): + image = self.driver.list_images()[0] + ret = self.driver.ex_edit_image(image=image, public=False, + ex_description="test", name="testname") + + self.assertTrue(isinstance(ret, NodeImage)) + + def test_ex_edit_node(self): + node = Node(id=90967, name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = self.driver.ex_edit_node(node=node, + size=self._get_test_512Mb_node_size()) + + self.assertTrue(isinstance(ret, Node)) + + def test_ex_list_ips(self): + ips = self.driver.ex_list_ips() + + expected_ips = {"192.168.75.66": GoGridIpAddress(id="5348099", + ip="192.168.75.66", public=True, state="Unassigned", + subnet="192.168.75.64/255.255.255.240"), + "192.168.75.67": GoGridIpAddress(id="5348100", + ip="192.168.75.67", public=True, state="Assigned", + subnet="192.168.75.64/255.255.255.240"), + "192.168.75.68": GoGridIpAddress(id="5348101", + ip="192.168.75.68", public=False, state="Unassigned", + subnet="192.168.75.64/255.255.255.240")} + + self.assertEqual(len(expected_ips), 3) + + for ip in ips: + self.assertTrue(ip.ip in expected_ips) + self.assertEqual(ip.public, expected_ips[ip.ip].public) + self.assertEqual(ip.state, expected_ips[ip.ip].state) + self.assertEqual(ip.subnet, expected_ips[ip.ip].subnet) + + del expected_ips[ip.ip] + + self.assertEqual(len(expected_ips), 0) + + def test_get_state_invalid(self): + state = self.driver._get_state('invalid') + self.assertEqual(state, NodeState.UNKNOWN) + + +class GoGridMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('gogrid') + + def _api_grid_image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_image_list_FAIL(self, method, url, body, headers): + body = "

some non valid json here

" + return (httplib.SERVICE_UNAVAILABLE, body, {}, + httplib.responses[httplib.SERVICE_UNAVAILABLE]) + + def _api_grid_server_list(self, method, url, body, headers): + body = self.fixtures.load('server_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_grid_server_list_NOPUBIPS = _api_grid_server_list + + def _api_grid_server_list_FAIL(self, method, url, body, headers): + return (httplib.FORBIDDEN, + "123", {}, httplib.responses[httplib.FORBIDDEN]) + + def _api_grid_ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list_NOPUBIPS(self, method, url, body, headers): + body = self.fixtures.load('ip_list_empty.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_power(self, method, url, body, headers): + body = self.fixtures.load('server_power.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_power_FAIL(self, method, url, body, headers): + body = self.fixtures.load('server_power_fail.json') + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_add(self, method, url, body, headers): + body = self.fixtures.load('server_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_grid_server_add_NOPUBIPS = _api_grid_server_add + + def _api_grid_server_delete(self, method, url, body, headers): + body = self.fixtures.load('server_delete.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_edit(self, method, url, body, headers): + body = self.fixtures.load('server_edit.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_support_password_list(self, method, url, body, headers): + body = self.fixtures.load('password_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_support_password_list_NOPUBIPS = _api_support_password_list + + def _api_grid_image_save(self, method, url, body, headers): + body = self.fixtures.load('image_save.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_image_edit(self, method, url, body, headers): + # edit method is quite similar to save method from the response + # perspective + body = self.fixtures.load('image_save.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_common_lookup_list(self, method, url, body, headers): + _valid_lookups = ("ip.datacenter",) + + lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0] + if lookup in _valid_lookups: + fixture_path = "lookup_list_%s.json" % \ + (lookup.replace(".", "_")) + else: + raise NotImplementedError + body = self.fixtures.load(fixture_path) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_gridspot.py libcloud-0.15.1/libcloud/test/compute/test_gridspot.py --- libcloud-0.5.0/libcloud/test/compute/test_gridspot.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_gridspot.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,234 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +from libcloud.utils.py3 import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.gridspot import GridspotNodeDriver +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.secrets import GRIDSPOT_PARAMS + + +class GridspotTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + GridspotNodeDriver.connectionCls.conn_classes = ( + None, + GridspotMockHttp + ) + GridspotMockHttp.type = None + self.driver = GridspotNodeDriver(*GRIDSPOT_PARAMS) + + def test_invalid_creds(self): + """ + Tests the error-handling for passing a bad API Key to the Gridspot API + """ + GridspotMockHttp.type = 'BAD_AUTH' + try: + self.driver.list_nodes() + # Above command should have thrown an InvalidCredsException + self.assertTrue(False) + except InvalidCredsError: + self.assertTrue(True) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + + running_node = nodes[0] + starting_node = nodes[1] + + self.assertEqual(running_node.id, 'inst_CP2WrQi2WIS4iheyAVkQYw') + self.assertEqual(running_node.state, NodeState.RUNNING) + self.assertTrue('69.4.239.74' in running_node.public_ips) + self.assertEqual(running_node.extra['port'], 62394) + self.assertEqual(running_node.extra['vm_ram'], 1429436743) + self.assertEqual(running_node.extra['start_state_time'], 1342108905) + self.assertEqual(running_node.extra['vm_num_logical_cores'], 8) + self.assertEqual(running_node.extra['vm_num_physical_cores'], 4) + self.assertEqual(running_node.extra['winning_bid_id'], + 'bid_X5xhotGYiGUk7_RmIqVafA') + self.assertFalse('ended_state_time' in running_node.extra) + self.assertEqual(running_node.extra['running_state_time'], 1342108989) + + self.assertEqual(starting_node.id, 'inst_CP2WrQi2WIS4iheyAVkQYw2') + self.assertEqual(starting_node.state, NodeState.PENDING) + self.assertTrue('69.4.239.74' in starting_node.public_ips) + self.assertEqual(starting_node.extra['port'], 62395) + self.assertEqual(starting_node.extra['vm_ram'], 1429436744) + self.assertEqual(starting_node.extra['start_state_time'], 1342108906) + self.assertEqual(starting_node.extra['vm_num_logical_cores'], 7) + self.assertEqual(starting_node.extra['vm_num_physical_cores'], 5) + self.assertEqual(starting_node.extra['winning_bid_id'], + 'bid_X5xhotGYiGUk7_RmIqVafA1') + self.assertFalse('ended_state_time' in starting_node.extra) + self.assertEqual(starting_node.extra['running_state_time'], 1342108990) + + def test_create_node(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_destroy_node(self): + """ + Test destroy_node for Gridspot driver + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_destroy_node_failure(self): + """ + Gridspot does not fail a destroy node unless the parameters are bad, in + which case it 404s + """ + self.assertTrue(True) + + def test_reboot_node(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_reboot_node_failure(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_resize_node(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_reboot_node_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_images_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_create_node_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_destroy_node_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_sizes_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_resize_node_failure(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_images(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_sizes(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_locations(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + def test_list_locations_response(self): + """ + Gridspot does not implement this functionality + """ + self.assertTrue(True) + + +class GridspotMockHttp(MockHttp): + + def _compute_api_v1_list_instances_BAD_AUTH(self, method, url, body, + headers): + return (httplib.NOT_FOUND, "", {}, + httplib.responses[httplib.NOT_FOUND]) + + def _compute_api_v1_list_instances(self, method, url, body, headers): + body = json.dumps({ + "instances": [ + { + "instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw", + "vm_num_logical_cores": 8, + "vm_num_physical_cores": 4, + "winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA", + "vm_ram": 1429436743, + "start_state_time": 1342108905, + "vm_ssh_wan_ip_endpoint": "69.4.239.74:62394", + "current_state": "Running", + "ended_state_time": "null", + "running_state_time": 1342108989 + }, + { + "instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw2", + "vm_num_logical_cores": 7, + "vm_num_physical_cores": 5, + "winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA1", + "vm_ram": 1429436744, + "start_state_time": 1342108906, + "vm_ssh_wan_ip_endpoint": "69.4.239.74:62395", + "current_state": "Starting", + "ended_state_time": "null", + "running_state_time": 1342108990 + } + ], + "exception_name": "" + }) + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _compute_api_v1_stop_instance(self, method, url, body, headers): + body = json.dumps({"exception_name": ""}) + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_hostvirtual.py libcloud-0.15.1/libcloud/test/compute/test_hostvirtual.py --- libcloud-0.5.0/libcloud/test/compute/test_hostvirtual.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_hostvirtual.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,188 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.hostvirtual import HostVirtualNodeDriver +from libcloud.compute.types import NodeState +from libcloud.compute.base import NodeAuthPassword +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import HOSTVIRTUAL_PARAMS + + +class HostVirtualTest(unittest.TestCase): + + def setUp(self): + HostVirtualNodeDriver.connectionCls.conn_classes = ( + None, HostVirtualMockHttp) + self.driver = HostVirtualNodeDriver(*HOSTVIRTUAL_PARAMS) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 4) + self.assertEqual(len(nodes[0].public_ips), 1) + self.assertEqual(len(nodes[1].public_ips), 1) + self.assertEqual(len(nodes[0].private_ips), 0) + self.assertEqual(len(nodes[1].private_ips), 0) + self.assertTrue('208.111.39.118' in nodes[1].public_ips) + self.assertTrue('208.111.45.250' in nodes[0].public_ips) + self.assertEqual(nodes[3].state, NodeState.RUNNING) + self.assertEqual(nodes[1].state, NodeState.TERMINATED) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 14) + self.assertEqual(sizes[0].id, '31') + self.assertEqual(sizes[4].id, '71') + self.assertEqual(sizes[2].ram, '512MB') + self.assertEqual(sizes[2].disk, '20GB') + self.assertEqual(sizes[3].bandwidth, '600GB') + self.assertEqual(sizes[1].price, '15.00') + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 8) + self.assertEqual(images[0].id, '1739') + self.assertEqual(images[0].name, 'Gentoo 2012 (0619) i386') + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(locations[0].id, '3') + self.assertEqual(locations[0].name, 'SJC - San Jose, CA') + self.assertEqual(locations[1].id, '13') + self.assertEqual(locations[1].name, 'IAD2- Reston, VA') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_ex_get_node(self): + node = self.driver.ex_get_node(node_id='62291') + self.assertEqual(node.id, '62291') + self.assertEqual(node.name, 'server1.vr-cluster.org') + self.assertEqual(node.state, NodeState.TERMINATED) + self.assertTrue('208.111.45.250' in node.public_ips) + + def test_ex_stop_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_stop_node(node)) + + def test_ex_start_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_start_node(node)) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_ex_delete_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_delete_node(node)) + + def test_create_node(self): + auth = NodeAuthPassword('vr!@#hosted#@!') + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name='test.com', + image=image, + size=size, + auth=auth + ) + self.assertEqual('62291', node.id) + self.assertEqual('server1.vr-cluster.org', node.name) + + def test_ex_provision_node(self): + node = self.driver.list_nodes()[0] + auth = NodeAuthPassword('vr!@#hosted#@!') + self.assertTrue(self.driver.ex_provision_node( + node=node, + auth=auth + )) + + def test_create_node_in_location(self): + auth = NodeAuthPassword('vr!@#hosted#@!') + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[1] + node = self.driver.create_node( + name='test.com', + image=image, + size=size, + auth=auth, + location=location + ) + self.assertEqual('62291', node.id) + self.assertEqual('server1.vr-cluster.org', node.name) + + +class HostVirtualMockHttp(MockHttp): + fixtures = ComputeFileFixtures('hostvirtual') + + def _cloud_servers(self, method, url, body, headers): + body = self.fixtures.load('list_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server(self, method, url, body, headers): + body = self.fixtures.load('get_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_sizes(self, method, url, body, headers): + body = self.fixtures.load('list_sizes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_images(self, method, url, body, headers): + body = self.fixtures.load('list_images.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_locations(self, method, url, body, headers): + body = self.fixtures.load('list_locations.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_cancel(self, method, url, body, headers): + body = self.fixtures.load('node_destroy.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server_reboot(self, method, url, body, headers): + body = self.fixtures.load('node_reboot.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server_shutdown(self, method, url, body, headers): + body = self.fixtures.load('node_stop.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server_start(self, method, url, body, headers): + body = self.fixtures.load('node_start.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_buy(self, method, url, body, headers): + body = self.fixtures.load('create_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server_build(self, method, url, body, headers): + body = self.fixtures.load('create_node.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _cloud_server_delete(self, method, url, body, headers): + body = self.fixtures.load('node_destroy.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) + +# vim:autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ibm_sce.py libcloud-0.15.1/libcloud/test/compute/test_ibm_sce.py --- libcloud-0.5.0/libcloud/test/compute/test_ibm_sce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ibm_sce.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,325 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +import unittest +from libcloud.utils.py3 import httplib +import sys + +from libcloud.compute.types import InvalidCredsError +from libcloud.compute.drivers.ibm_sce import IBMNodeDriver as IBM +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import IBM_PARAMS + + +class IBMTests(unittest.TestCase, TestCaseMixin): + + """ + Tests the IBM SmartCloud Enterprise driver. + """ + + def setUp(self): + IBM.connectionCls.conn_classes = (None, IBMMockHttp) + IBMMockHttp.type = None + self.driver = IBM(*IBM_PARAMS) + + def test_auth(self): + IBMMockHttp.type = 'UNAUTHORIZED' + + try: + self.driver.list_nodes() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertTrue(isinstance(e, InvalidCredsError)) + self.assertEqual(e.value, '401: Unauthorized') + else: + self.fail('test should have thrown') + + def test_list_nodes(self): + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 3) + self.assertEqual(ret[0].id, '26557') + self.assertEqual(ret[0].name, 'Insight Instance') + self.assertEqual(ret[0].public_ips, ['129.33.196.128']) + self.assertEqual(ret[0].private_ips, []) # Private IPs not supported + self.assertEqual(ret[1].public_ips, []) # Node is non-active (no IP) + self.assertEqual(ret[1].private_ips, []) + self.assertEqual(ret[1].id, '28193') + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 9) # 9 instance configurations supported + self.assertEqual(ret[0].id, 'BRZ32.1/2048/60*175') + self.assertEqual(ret[1].id, 'BRZ64.2/4096/60*500*350') + self.assertEqual(ret[2].id, 'COP32.1/2048/60') + self.assertEqual(ret[0].name, 'Bronze 32 bit') + self.assertEqual(ret[0].disk, None) + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual(len(ret), 21) + self.assertEqual(ret[10].name, "Rational Asset Manager 7.2.0.1") + self.assertEqual(ret[9].id, '10002573') + + def test_list_locations(self): + ret = self.driver.list_locations() + self.assertEqual(len(ret), 6) + self.assertEqual(ret[0].id, '41') + self.assertEqual(ret[0].name, 'Raleigh') + self.assertEqual(ret[0].country, 'U.S.A') + + def test_create_node(self): + # Test creation of node + IBMMockHttp.type = 'CREATE' + image = NodeImage(id=11, name='Rational Insight', driver=self.driver) + size = NodeSize('LARGE', 'LARGE', None, None, None, None, self.driver) + location = NodeLocation('1', 'POK', 'US', driver=self.driver) + ret = self.driver.create_node(name='RationalInsight4', + image=image, + size=size, + location=location, + publicKey='MyPublicKey', + configurationData={ + 'insight_admin_password': 'myPassword1', + 'db2_admin_password': 'myPassword2', + 'report_user_password': 'myPassword3'}) + self.assertTrue(isinstance(ret, Node)) + self.assertEqual(ret.name, 'RationalInsight4') + + # Test creation attempt with invalid location + IBMMockHttp.type = 'CREATE_INVALID' + location = NodeLocation('3', 'DOESNOTEXIST', 'US', driver=self.driver) + try: + ret = self.driver.create_node(name='RationalInsight5', + image=image, + size=size, + location=location, + publicKey='MyPublicKey', + configurationData={ + 'insight_admin_password': 'myPassword1', + 'db2_admin_password': 'myPassword2', + 'report_user_password': 'myPassword3'}) + except Exception: + e = sys.exc_info()[1] + self.assertEqual(e.args[0], 'Error 412: No DataCenter with id: 3') + else: + self.fail('test should have thrown') + + def test_destroy_node(self): + # Delete existent node + nodes = self.driver.list_nodes() # retrieves 3 nodes + self.assertEqual(len(nodes), 3) + IBMMockHttp.type = 'DELETE' + toDelete = nodes[1] + ret = self.driver.destroy_node(toDelete) + self.assertTrue(ret) + + # Delete non-existent node + IBMMockHttp.type = 'DELETED' + nodes = self.driver.list_nodes() # retrieves 2 nodes + self.assertEqual(len(nodes), 2) + try: + self.driver.destroy_node(toDelete) # delete non-existent node + except Exception: + e = sys.exc_info()[1] + self.assertEqual(e.args[0], 'Error 404: Invalid Instance ID 28193') + else: + self.fail('test should have thrown') + + def test_reboot_node(self): + nodes = self.driver.list_nodes() + IBMMockHttp.type = 'REBOOT' + + # Reboot active node + self.assertEqual(len(nodes), 3) + ret = self.driver.reboot_node(nodes[0]) + self.assertTrue(ret) + + # Reboot inactive node + try: + ret = self.driver.reboot_node(nodes[1]) + except Exception: + e = sys.exc_info()[1] + self.assertEqual( + e.args[0], 'Error 412: Instance must be in the Active state') + else: + self.fail('test should have thrown') + + def test_list_volumes(self): + ret = self.driver.list_volumes() + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0].name, 'libcloudvol') + self.assertEqual(ret[0].extra['location'], '141') + self.assertEqual(ret[0].size, '2048') + self.assertEqual(ret[0].id, '39281') + + def test_attach_volume(self): + vols = self.driver.list_volumes() + nodes = self.driver.list_nodes() + IBMMockHttp.type = 'ATTACH' + ret = self.driver.attach_volume(nodes[0], vols[0]) + self.assertTrue(ret) + + def test_create_volume(self): + IBMMockHttp.type = 'CREATE' + ret = self.driver.create_volume('256', + 'test-volume', + location='141', + format='RAW', + offering_id='20001208') + self.assertEqual(ret.id, '39293') + self.assertEqual(ret.size, '256') + self.assertEqual(ret.name, 'test-volume') + self.assertEqual(ret.extra['location'], '141') + + def test_destroy_volume(self): + vols = self.driver.list_volumes() + IBMMockHttp.type = 'DESTROY' + ret = self.driver.destroy_volume(vols[0]) + self.assertTrue(ret) + + def test_ex_destroy_image(self): + image = self.driver.list_images() + IBMMockHttp.type = 'DESTROY' + ret = self.driver.ex_destroy_image(image[0]) + self.assertTrue(ret) + + def test_detach_volume(self): + nodes = self.driver.list_nodes() + vols = self.driver.list_volumes() + IBMMockHttp.type = 'DETACH' + ret = self.driver.detach_volume(nodes[0], vols[0]) + self.assertTrue(ret) + + def test_ex_allocate_address(self): + IBMMockHttp.type = 'ALLOCATE' + ret = self.driver.ex_allocate_address('141', '20001223') + self.assertEqual(ret.id, '292795') + self.assertEqual(ret.state, '0') + self.assertEqual(ret.options['location'], '141') + + def test_ex_delete_address(self): + IBMMockHttp.type = 'DELETE' + ret = self.driver.ex_delete_address('292795') + self.assertTrue(ret) + + def test_ex_list_addresses(self): + ret = self.driver.ex_list_addresses() + self.assertEqual(ret[0].ip, '170.225.160.218') + self.assertEqual(ret[0].options['location'], '141') + self.assertEqual(ret[0].id, '292795') + self.assertEqual(ret[0].state, '2') + + def test_ex_list_storage_offerings(self): + ret = self.driver.ex_list_storage_offerings() + self.assertEqual(ret[0].name, 'Small') + self.assertEqual(ret[0].location, '61') + self.assertEqual(ret[0].id, '20001208') + + +class IBMMockHttp(MockHttp): + fixtures = ComputeFileFixtures('ibm_sce') + + def _computecloud_enterprise_api_rest_20100331_instances(self, method, url, body, headers): + body = self.fixtures.load('instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_DELETED(self, method, url, body, headers): + body = self.fixtures.load('instances_deleted.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _computecloud_enterprise_api_rest_20100331_offerings_image(self, method, url, body, headers): + body = self.fixtures.load('images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_locations(self, method, url, body, headers): + body = self.fixtures.load('locations.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_26557_REBOOT(self, method, url, body, headers): + body = self.fixtures.load('reboot_active.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_28193_REBOOT(self, method, url, body, headers): + return (412, 'Error 412: Instance must be in the Active state', {}, 'Precondition Failed') + + def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETE(self, method, url, body, headers): + body = self.fixtures.load('delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETED(self, method, url, body, headers): + return (404, 'Error 404: Invalid Instance ID 28193', {}, 'Precondition Failed') + + def _computecloud_enterprise_api_rest_20100331_instances_CREATE(self, method, url, body, headers): + body = self.fixtures.load('create.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_CREATE_INVALID(self, method, url, body, headers): + return (412, 'Error 412: No DataCenter with id: 3', {}, 'Precondition Failed') + + def _computecloud_enterprise_api_rest_20100331_storage(self, method, url, body, headers): + body = self.fixtures.load('list_volumes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_26557_ATTACH(self, method, url, body, headers): + body = self.fixtures.load('attach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_storage_CREATE(self, method, url, body, headers): + body = self.fixtures.load('create_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_storage_39281_DESTROY(self, method, url, body, headers): + body = self.fixtures.load('destroy_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_offerings_image_2_DESTROY(self, method, url, body, headers): + body = self.fixtures.load('destroy_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_26557_DETACH(self, method, url, body, headers): + body = self.fixtures.load('detach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_addresses_ALLOCATE(self, method, url, body, headers): + body = self.fixtures.load('allocate_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_addresses_292795_DELETE(self, method, url, body, headers): + body = self.fixtures.load('delete_address.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_addresses(self, method, url, body, headers): + body = self.fixtures.load('list_addresses.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_offerings_storage(self, method, url, body, headers): + body = self.fixtures.load('list_storage_offerings.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + # This is only to accommodate the response tests built into test\__init__.py + def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load('delete.xml') + else: + body = self.fixtures.load('reboot_active.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ikoula.py libcloud-0.15.1/libcloud/test/compute/test_ikoula.py --- libcloud-0.5.0/libcloud/test/compute/test_ikoula.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ikoula.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from libcloud.compute.drivers.ikoula import IkoulaNodeDriver +from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase + +from libcloud.test import unittest + + +class IkoulaNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): + driver_klass = IkoulaNodeDriver + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_joyent.py libcloud-0.15.1/libcloud/test/compute/test_joyent.py --- libcloud-0.5.0/libcloud/test/compute/test_joyent.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_joyent.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from libcloud.utils.py3 import httplib +from libcloud.common.types import LibcloudError +from libcloud.compute.base import NodeState +from libcloud.compute.drivers.joyent import JoyentNodeDriver + +from libcloud.test import MockHttp, unittest +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import JOYENT_PARAMS + + +class JoyentTestCase(unittest.TestCase): + + def setUp(self): + JoyentNodeDriver.connectionCls.conn_classes = (None, JoyentHttp) + self.driver = JoyentNodeDriver(*JOYENT_PARAMS) + + def test_instantiate_multiple_drivers_with_different_region(self): + kwargs1 = {'region': 'us-east-1'} + kwargs2 = {'region': 'us-west-1'} + driver1 = JoyentNodeDriver(*JOYENT_PARAMS, **kwargs1) + driver2 = JoyentNodeDriver(*JOYENT_PARAMS, **kwargs2) + + self.assertTrue(driver1.connection.host.startswith(kwargs1['region'])) + self.assertTrue(driver2.connection.host.startswith(kwargs2['region'])) + + driver1.list_nodes() + driver2.list_nodes() + driver1.list_nodes() + + self.assertTrue(driver1.connection.host.startswith(kwargs1['region'])) + self.assertTrue(driver2.connection.host.startswith(kwargs2['region'])) + + def test_location_backward_compatibility(self): + kwargs = {'location': 'us-west-1'} + driver = JoyentNodeDriver(*JOYENT_PARAMS, **kwargs) + self.assertTrue(driver.connection.host.startswith(kwargs['location'])) + + def test_instantiate_invalid_region(self): + expected_msg = 'Invalid region.+' + + self.assertRaisesRegexp(LibcloudError, expected_msg, JoyentNodeDriver, + 'user', 'key', location='invalid') + self.assertRaisesRegexp(LibcloudError, expected_msg, JoyentNodeDriver, + 'user', 'key', region='invalid') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + self.assertEqual(sizes[0].ram, 16384) + + def test_list_images(self): + images = self.driver.list_images() + + self.assertEqual(images[0].name, 'nodejs') + + def test_list_nodes_with_and_without_credentials(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + + node = nodes[0] + self.assertEqual(node.public_ips[0], '165.225.129.129') + self.assertEqual(node.private_ips[0], '10.112.1.130') + self.assertEqual(node.state, NodeState.RUNNING) + + node = nodes[1] + self.assertEqual(node.public_ips[0], '165.225.129.128') + self.assertEqual(node.private_ips[0], '10.112.1.131') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.extra['password'], 'abc') + + def test_create_node(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node(image=image, size=size, name='testlc') + + self.assertEqual(node.name, 'testlc') + + def test_ex_stop_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_stop_node(node)) + + def test_ex_start_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.ex_start_node(node)) + + +class JoyentHttp(MockHttp): + fixtures = ComputeFileFixtures('joyent') + + def _my_packages(self, method, url, body, headers): + body = self.fixtures.load('my_packages.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _my_datasets(self, method, url, body, headers): + body = self.fixtures.load('my_datasets.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _my_machines(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('my_machines.json') + elif method == 'POST': + body = self.fixtures.load('my_machines_create.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _my_machines_2fb67f5f_53f2_40ab_9d99_b9ff68cfb2ab(self, method, url, + body, headers): + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ktucloud.py libcloud-0.15.1/libcloud/test/compute/test_ktucloud.py --- libcloud-0.5.0/libcloud/test/compute/test_ktucloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ktucloud.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,133 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qsl + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.compute.drivers.ktucloud import KTUCloudNodeDriver + +from libcloud.test import MockHttpTestCase +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class KTUCloudNodeDriverTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + KTUCloudNodeDriver.connectionCls.conn_classes = \ + (None, KTUCloudStackMockHttp) + self.driver = KTUCloudNodeDriver('apikey', 'secret', + path='/test/path', + host='api.dummy.com') + self.driver.path = '/test/path' + self.driver.type = -1 + KTUCloudStackMockHttp.fixture_tag = 'default' + self.driver.connection.poll_interval = 0.0 + + def test_create_node_immediate_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + KTUCloudStackMockHttp.fixture_tag = 'deployfail' + try: + self.driver.create_node(name='node-name', image=image, size=size) + except: + return + self.assertTrue(False) + + def test_create_node_delayed_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + KTUCloudStackMockHttp.fixture_tag = 'deployfail2' + try: + self.driver.create_node(name='node-name', image=image, size=size) + except: + return + self.assertTrue(False) + + def test_list_images_no_images_available(self): + KTUCloudStackMockHttp.fixture_tag = 'notemplates' + + images = self.driver.list_images() + self.assertEqual(0, len(images)) + + def test_list_images_available(self): + images = self.driver.list_images() + self.assertEqual(112, len(images)) + + def test_list_sizes_available(self): + sizes = self.driver.list_sizes() + self.assertEqual(112, len(sizes)) + + def test_list_sizes_nodisk(self): + KTUCloudStackMockHttp.fixture_tag = 'nodisk' + + sizes = self.driver.list_sizes() + self.assertEqual(2, len(sizes)) + + check = False + size = sizes[1] + if size.id == KTUCloudNodeDriver.EMPTY_DISKOFFERINGID: + check = True + + self.assertTrue(check) + + +class KTUCloudStackMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('ktucloud') + fixture_tag = 'default' + + def _load_fixture(self, fixture): + body = self.fixtures.load(fixture) + return body, json.loads(body) + + def _test_path(self, method, url, body, headers): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + del query['apiKey'] + del query['response'] + del query['signature'] + command = query.pop('command') + + if hasattr(self, '_cmd_' + command): + return getattr(self, '_cmd_' + command)(**query) + else: + fixture = command + '_' + self.fixture_tag + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + + def _cmd_queryAsyncJobResult(self, jobid): + fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_linode.py libcloud-0.15.1/libcloud/test/compute/test_linode.py --- libcloud-0.5.0/libcloud/test/compute/test_linode.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_linode.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Maintainer: Jed Smith +# Based upon code written by Alex Polvi +# + +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.linode import LinodeNodeDriver +from libcloud.compute.base import Node, NodeAuthPassword, NodeAuthSSHKey + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class LinodeTest(unittest.TestCase, TestCaseMixin): + # The Linode test suite + + def setUp(self): + LinodeNodeDriver.connectionCls.conn_classes = (None, LinodeMockHttp) + LinodeMockHttp.use_param = 'api_action' + self.driver = LinodeNodeDriver('foo') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + node = nodes[0] + self.assertEqual(node.id, "8098") + self.assertEqual(node.name, 'api-node3') + self.assertEqual(node.extra['PLANID'], '1') + self.assertTrue('75.127.96.245' in node.public_ips) + self.assertEqual(node.private_ips, []) + + def test_reboot_node(self): + # An exception would indicate failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # An exception would indicate failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node_password_auth(self): + # Will exception on failure + self.driver.create_node(name="Test", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[6], + auth=NodeAuthPassword("test123")) + + def test_create_node_ssh_key_auth(self): + # Will exception on failure + node = self.driver.create_node(name="Test", + location=self.driver.list_locations()[ + 0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[6], + auth=NodeAuthSSHKey('foo')) + self.assertTrue(isinstance(node, Node)) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 8) + for size in sizes: + self.assertEqual(size.ram, int(size.name.split(" ")[1])) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 30) + + def test_create_node_response(self): + # should return a node object + node = self.driver.create_node(name="node-name", + location=self.driver.list_locations()[ + 0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + auth=NodeAuthPassword("foobar")) + self.assertTrue(isinstance(node, Node)) + + +class LinodeMockHttp(MockHttp): + fixtures = ComputeFileFixtures('linode') + + def _avail_datacenters(self, method, url, body, headers): + body = self.fixtures.load('_avail_datacenters.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_linodeplans(self, method, url, body, headers): + body = self.fixtures.load('_avail_linodeplans.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_distributions(self, method, url, body, headers): + body = self.fixtures.load('_avail_distributions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.create","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_disk_createfromdistribution(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.disk.createFromDistribution","DATA":{"JobID":1298,"DiskID":55647}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_delete(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.delete","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_update(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.update","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_reboot(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.reboot","DATA":{"JobID":1305}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_kernels(self, method, url, body, headers): + body = self.fixtures.load('_avail_kernels.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_disk_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.disk.create","DATA":{"JobID":1299,"DiskID":55648}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_boot(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.boot","DATA":{"JobID":1300}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_config_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.config.create","DATA":{"ConfigID":31239}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_list(self, method, url, body, headers): + body = self.fixtures.load('_linode_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_ip_list(self, method, url, body, headers): + body = self.fixtures.load('_linode_ip_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _batch(self, method, url, body, headers): + body = self.fixtures.load('_batch.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_nephoscale.py libcloud-0.15.1/libcloud/test/compute/test_nephoscale.py --- libcloud-0.5.0/libcloud/test/compute/test_nephoscale.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_nephoscale.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,188 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Created by Markos Gogoulos (https://mist.io) +# + +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.nephoscale import NephoscaleNodeDriver + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class NephoScaleTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + NephoscaleNodeDriver.connectionCls.conn_classes = ( + NephoscaleMockHttp, NephoscaleMockHttp) + self.driver = NephoscaleNodeDriver('user', 'password') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 13) + for size in sizes: + self.assertEqual(type(size.disk), int) + self.assertEqual(type(size.ram), int) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 18) + for image in images: + arch = image.extra.get('architecture') + self.assertTrue(arch.startswith('x86')) + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 2) + self.assertEqual(locations[0].name, "SJC-1") + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + self.assertEqual(nodes[0].extra.get('zone'), 'RIC-1') + self.assertEqual(nodes[0].name, 'mongodb-staging') + self.assertEqual(nodes[0].extra.get('service_type'), + 'CS05 - 0.5GB, 1Core, 25GB') + + def test_list_keys(self): + keys = self.driver.ex_list_keypairs() + self.assertEqual(len(keys), 2) + self.assertEqual(keys[0].name, 'mistio-ssh') + + def test_list_ssh_keys(self): + ssh_keys = self.driver.ex_list_keypairs(ssh=True) + self.assertEqual(len(ssh_keys), 1) + self.assertTrue(ssh_keys[0].public_key.startswith('ssh-rsa')) + + def test_list_password_keys(self): + password_keys = self.driver.ex_list_keypairs(password=True) + self.assertEqual(len(password_keys), 1) + self.assertEquals(password_keys[0].password, '23d493j5') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + result = self.driver.reboot_node(node) + self.assertTrue(result) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + result = self.driver.destroy_node(node) + self.assertTrue(result) + + def test_stop_node(self): + node = self.driver.list_nodes()[0] + result = self.driver.ex_stop_node(node) + self.assertTrue(result) + + def test_start_node(self): + node = self.driver.list_nodes()[0] + result = self.driver.ex_start_node(node) + self.assertTrue(result) + + def test_rename_node(self): + node = self.driver.list_nodes()[0] + result = self.driver.rename_node(node, 'new-name') + self.assertTrue(result) + + def test_create_node(self): + name = 'mongodb-staging' + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[3] + node = self.driver.create_node(name=name, + size=size, + nowait=True, + image=image) + self.assertEqual(node.name, 'mongodb-staging') + + def test_create_node_no_name(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[3] + self.assertRaises(TypeError, self.driver.create_node, size=size, + image=image) + + def test_delete_ssh_keys(self): + self.assertTrue(self.driver.ex_delete_keypair(key_id=72209, ssh=True)) + + def test_delete_password_keys(self): + self.assertTrue(self.driver.ex_delete_keypair(key_id=72211)) + + +class NephoscaleMockHttp(MockHttp): + fixtures = ComputeFileFixtures('nephoscale') + + def _server_type_cloud(self, method, url, body, headers): + body = self.fixtures.load('list_sizes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _server_cloud(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('success_action.json') + else: + body = self.fixtures.load('list_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _image_server(self, method, url, body, headers): + body = self.fixtures.load('list_images.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _datacenter_zone(self, method, url, body, headers): + body = self.fixtures.load('list_locations.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _key(self, method, url, body, headers): + body = self.fixtures.load('list_keys.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _key_sshrsa(self, method, url, body, headers): + body = self.fixtures.load('list_ssh_keys.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _key_password(self, method, url, body, headers): + body = self.fixtures.load('list_password_keys.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _server_cloud_88241(self, method, url, body, headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _server_cloud_88241_initiator_restart(self, method, url, body, + headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _server_cloud_88241_initiator_start(self, method, url, body, headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _server_cloud_88241_initiator_stop(self, method, url, body, headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _key_password_72211(self, method, url, body, headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _key_sshrsa_72209(self, method, url, body, headers): + body = self.fixtures.load('success_action.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_opennebula.py libcloud-0.15.1/libcloud/test/compute/test_opennebula.py --- libcloud-0.5.0/libcloud/test/compute/test_opennebula.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_opennebula.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,1273 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +OpenNebula.org test suite. +""" + +__docformat__ = 'epytext' + +import unittest +import sys + +from libcloud.utils.py3 import httplib + +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState +from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver +from libcloud.compute.drivers.opennebula import OpenNebulaNetwork +from libcloud.compute.drivers.opennebula import OpenNebulaResponse +from libcloud.compute.drivers.opennebula import OpenNebulaNodeSize +from libcloud.compute.drivers.opennebula import ACTION + +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.common.types import InvalidCredsError +from libcloud.test import MockResponse, MockHttp +from libcloud.test.compute import TestCaseMixin + +from libcloud.test.secrets import OPENNEBULA_PARAMS + + +class OpenNebulaCaseMixin(TestCaseMixin): + + def test_reboot_node_response(self): + pass + + +class OpenNebula_ResponseTests(unittest.TestCase): + XML = """""" + + def test_unauthorized_response(self): + http_response = MockResponse(httplib.UNAUTHORIZED, + OpenNebula_ResponseTests.XML, + headers={'content-type': + 'application/xml'}) + try: + OpenNebulaResponse(http_response, None).parse_body() + except InvalidCredsError: + exceptionType = sys.exc_info()[0] + self.assertEqual(exceptionType, type(InvalidCredsError())) + + +class OpenNebula_1_4_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v1.4. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_1_4_MockHttp, OpenNebula_1_4_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',)) + + def test_create_node(self): + """ + Test create_node functionality. + """ + image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) + size = NodeSize(id=1, name='small', ram=None, disk=None, + bandwidth=None, price=None, driver=self.driver) + networks = list() + networks.append(OpenNebulaNetwork(id=5, name='Network 5', + address='192.168.0.0', size=256, driver=self.driver)) + networks.append(OpenNebulaNetwork(id=15, name='Network 15', + address='192.168.1.0', size=256, driver=self.driver)) + + node = self.driver.create_node(name='Compute 5', image=image, + size=size, networks=networks) + + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'Compute 5') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].name, None) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].address, '192.168.0.1') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[1].name, None) + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].address, '192.168.1.1') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.private_ips, []) + self.assertEqual(node.image.id, '5') + self.assertEqual(node.image.extra['dev'], 'sda1') + + def test_destroy_node(self): + """ + Test destroy_node functionality. + """ + node = Node(5, None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_nodes(self): + """ + Test list_nodes functionality. + """ + nodes = self.driver.list_nodes() + + self.assertEqual(len(nodes), 3) + node = nodes[0] + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'Compute 5') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, None) + self.assertEqual(node.public_ips[0].address, '192.168.0.1') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, None) + self.assertEqual(node.public_ips[1].address, '192.168.1.1') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.private_ips, []) + self.assertEqual(node.image.id, '5') + self.assertEqual(node.image.extra['dev'], 'sda1') + node = nodes[1] + self.assertEqual(node.id, '15') + self.assertEqual(node.name, 'Compute 15') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, None) + self.assertEqual(node.public_ips[0].address, '192.168.0.2') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, None) + self.assertEqual(node.public_ips[1].address, '192.168.1.2') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.private_ips, []) + self.assertEqual(node.image.id, '15') + self.assertEqual(node.image.extra['dev'], 'sda1') + node = nodes[2] + self.assertEqual(node.id, '25') + self.assertEqual(node.name, 'Compute 25') + self.assertEqual(node.state, + NodeState.UNKNOWN) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, None) + self.assertEqual(node.public_ips[0].address, '192.168.0.3') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, None) + self.assertEqual(node.public_ips[1].address, '192.168.1.3') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.private_ips, []) + self.assertEqual(node.image, None) + + def test_list_images(self): + """ + Test list_images functionality. + """ + images = self.driver.list_images() + + self.assertEqual(len(images), 2) + image = images[0] + self.assertEqual(image.id, '5') + self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(image.extra['size'], '2048') + self.assertEqual(image.extra['url'], + 'file:///images/ubuntu/jaunty.img') + image = images[1] + self.assertEqual(image.id, '15') + self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(image.extra['size'], '2048') + self.assertEqual(image.extra['url'], + 'file:///images/ubuntu/jaunty.img') + + def test_list_sizes(self): + """ + Test list_sizes functionality. + """ + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.name, 'small') + self.assertEqual(size.ram, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[1] + self.assertEqual(size.id, '2') + self.assertEqual(size.name, 'medium') + self.assertEqual(size.ram, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[2] + self.assertEqual(size.id, '3') + self.assertEqual(size.name, 'large') + self.assertEqual(size.ram, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + def test_list_locations(self): + """ + Test list_locations functionality. + """ + locations = self.driver.list_locations() + + self.assertEqual(len(locations), 1) + location = locations[0] + self.assertEqual(location.id, '0') + self.assertEqual(location.name, '') + self.assertEqual(location.country, '') + + def test_ex_list_networks(self): + """ + Test ex_list_networks functionality. + """ + networks = self.driver.ex_list_networks() + + self.assertEqual(len(networks), 2) + network = networks[0] + self.assertEqual(network.id, '5') + self.assertEqual(network.name, 'Network 5') + self.assertEqual(network.address, '192.168.0.0') + self.assertEqual(network.size, '256') + network = networks[1] + self.assertEqual(network.id, '15') + self.assertEqual(network.name, 'Network 15') + self.assertEqual(network.address, '192.168.1.0') + self.assertEqual(network.size, '256') + + def test_ex_node_action(self): + """ + Test ex_node_action functionality. + """ + node = Node(5, None, None, None, None, self.driver) + ret = self.driver.ex_node_action(node, ACTION.STOP) + self.assertTrue(ret) + + +class OpenNebula_2_0_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v2.0 through v2.2. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_2_0_MockHttp, OpenNebula_2_0_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',)) + + def test_create_node(self): + """ + Test create_node functionality. + """ + image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) + size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1, + disk=None, bandwidth=None, price=None, + driver=self.driver) + networks = list() + networks.append(OpenNebulaNetwork(id=5, name='Network 5', + address='192.168.0.0', size=256, driver=self.driver)) + networks.append(OpenNebulaNetwork(id=15, name='Network 15', + address='192.168.1.0', size=256, driver=self.driver)) + context = {'hostname': 'compute-5'} + + node = self.driver.create_node(name='Compute 5', image=image, + size=size, networks=networks, + context=context) + + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'Compute 5') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, 'Network 5') + self.assertEqual(node.public_ips[0].address, '192.168.0.1') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, 'Network 15') + self.assertEqual(node.public_ips[1].address, '192.168.1.1') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') + self.assertEqual(node.private_ips, []) + self.assertTrue(len([s for s in self.driver.list_sizes() + if s.id == node.size.id]) == 1) + self.assertEqual(node.image.id, '5') + self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(node.image.extra['type'], 'DISK') + self.assertEqual(node.image.extra['target'], 'hda') + context = node.extra['context'] + self.assertEqual(context['hostname'], 'compute-5') + + def test_destroy_node(self): + """ + Test destroy_node functionality. + """ + node = Node(5, None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_nodes(self): + """ + Test list_nodes functionality. + """ + nodes = self.driver.list_nodes() + + self.assertEqual(len(nodes), 3) + node = nodes[0] + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'Compute 5') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, 'Network 5') + self.assertEqual(node.public_ips[0].address, '192.168.0.1') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, 'Network 15') + self.assertEqual(node.public_ips[1].address, '192.168.1.1') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') + self.assertEqual(node.private_ips, []) + self.assertTrue(len([size for size in self.driver.list_sizes() + if size.id == node.size.id]) == 1) + self.assertEqual(node.size.id, '1') + self.assertEqual(node.size.name, 'small') + self.assertEqual(node.size.ram, 1024) + self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, + int)) + self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, + int)) + self.assertEqual(node.size.cpu, 1) + self.assertEqual(node.size.vcpu, None) + self.assertEqual(node.size.disk, None) + self.assertEqual(node.size.bandwidth, None) + self.assertEqual(node.size.price, None) + self.assertTrue(len([image for image in self.driver.list_images() + if image.id == node.image.id]) == 1) + self.assertEqual(node.image.id, '5') + self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(node.image.extra['type'], 'DISK') + self.assertEqual(node.image.extra['target'], 'hda') + context = node.extra['context'] + self.assertEqual(context['hostname'], 'compute-5') + node = nodes[1] + self.assertEqual(node.id, '15') + self.assertEqual(node.name, 'Compute 15') + self.assertEqual(node.state, + OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, 'Network 5') + self.assertEqual(node.public_ips[0].address, '192.168.0.2') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02') + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, 'Network 15') + self.assertEqual(node.public_ips[1].address, '192.168.1.2') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02') + self.assertEqual(node.private_ips, []) + self.assertTrue(len([size for size in self.driver.list_sizes() + if size.id == node.size.id]) == 1) + self.assertEqual(node.size.id, '1') + self.assertEqual(node.size.name, 'small') + self.assertEqual(node.size.ram, 1024) + self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, + int)) + self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, + int)) + self.assertEqual(node.size.cpu, 1) + self.assertEqual(node.size.vcpu, None) + self.assertEqual(node.size.disk, None) + self.assertEqual(node.size.bandwidth, None) + self.assertEqual(node.size.price, None) + self.assertTrue(len([image for image in self.driver.list_images() + if image.id == node.image.id]) == 1) + self.assertEqual(node.image.id, '15') + self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(node.image.extra['type'], 'DISK') + self.assertEqual(node.image.extra['target'], 'hda') + context = node.extra['context'] + self.assertEqual(context['hostname'], 'compute-15') + node = nodes[2] + self.assertEqual(node.id, '25') + self.assertEqual(node.name, 'Compute 25') + self.assertEqual(node.state, + NodeState.UNKNOWN) + self.assertEqual(node.public_ips[0].id, '5') + self.assertEqual(node.public_ips[0].name, 'Network 5') + self.assertEqual(node.public_ips[0].address, '192.168.0.3') + self.assertEqual(node.public_ips[0].size, 1) + self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03') + self.assertEqual(node.public_ips[1].id, '15') + self.assertEqual(node.public_ips[1].name, 'Network 15') + self.assertEqual(node.public_ips[1].address, '192.168.1.3') + self.assertEqual(node.public_ips[1].size, 1) + self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03') + self.assertEqual(node.private_ips, []) + self.assertEqual(node.size, None) + self.assertEqual(node.image, None) + context = node.extra['context'] + self.assertEqual(context, {}) + + def test_list_images(self): + """ + Test list_images functionality. + """ + images = self.driver.list_images() + + self.assertEqual(len(images), 2) + image = images[0] + self.assertEqual(image.id, '5') + self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(image.extra['description'], + 'Ubuntu 9.04 LAMP Description') + self.assertEqual(image.extra['type'], 'OS') + self.assertEqual(image.extra['size'], '2048') + image = images[1] + self.assertEqual(image.id, '15') + self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') + self.assertEqual(image.extra['description'], + 'Ubuntu 9.04 LAMP Description') + self.assertEqual(image.extra['type'], 'OS') + self.assertEqual(image.extra['size'], '2048') + + def test_list_sizes(self): + """ + Test list_sizes functionality. + """ + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 4) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.name, 'small') + self.assertEqual(size.ram, 1024) + self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 1) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[1] + self.assertEqual(size.id, '2') + self.assertEqual(size.name, 'medium') + self.assertEqual(size.ram, 4096) + self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 4) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[2] + self.assertEqual(size.id, '3') + self.assertEqual(size.name, 'large') + self.assertEqual(size.ram, 8192) + self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 8) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[3] + self.assertEqual(size.id, '4') + self.assertEqual(size.name, 'custom') + self.assertEqual(size.ram, 0) + self.assertEqual(size.cpu, 0) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + def test_list_locations(self): + """ + Test list_locations functionality. + """ + locations = self.driver.list_locations() + + self.assertEqual(len(locations), 1) + location = locations[0] + self.assertEqual(location.id, '0') + self.assertEqual(location.name, '') + self.assertEqual(location.country, '') + + def test_ex_list_networks(self): + """ + Test ex_list_networks functionality. + """ + networks = self.driver.ex_list_networks() + + self.assertEqual(len(networks), 2) + network = networks[0] + self.assertEqual(network.id, '5') + self.assertEqual(network.name, 'Network 5') + self.assertEqual(network.address, '192.168.0.0') + self.assertEqual(network.size, '256') + network = networks[1] + self.assertEqual(network.id, '15') + self.assertEqual(network.name, 'Network 15') + self.assertEqual(network.address, '192.168.1.0') + self.assertEqual(network.size, '256') + + +class OpenNebula_3_0_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v3.0. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_0_MockHttp, OpenNebula_3_0_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',)) + + def test_ex_list_networks(self): + """ + Test ex_list_networks functionality. + """ + networks = self.driver.ex_list_networks() + + self.assertEqual(len(networks), 2) + network = networks[0] + self.assertEqual(network.id, '5') + self.assertEqual(network.name, 'Network 5') + self.assertEqual(network.address, '192.168.0.0') + self.assertEqual(network.size, '256') + self.assertEqual(network.extra['public'], 'YES') + network = networks[1] + self.assertEqual(network.id, '15') + self.assertEqual(network.name, 'Network 15') + self.assertEqual(network.address, '192.168.1.0') + self.assertEqual(network.size, '256') + self.assertEqual(network.extra['public'], 'NO') + + def test_ex_node_set_save_name(self): + """ + Test ex_node_action functionality. + """ + image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) + node = Node(5, None, None, None, None, self.driver, image=image) + ret = self.driver.ex_node_set_save_name(node, 'test') + self.assertTrue(ret) + + +class OpenNebula_3_2_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v3.2. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_2_MockHttp, OpenNebula_3_2_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',)) + + def test_reboot_node(self): + """ + Test reboot_node functionality. + """ + image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) + node = Node(5, None, None, None, None, self.driver, image=image) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_list_sizes(self): + """ + Test ex_list_networks functionality. + """ + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.name, 'small') + self.assertEqual(size.ram, 1024) + self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 1) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[1] + self.assertEqual(size.id, '2') + self.assertEqual(size.name, 'medium') + self.assertEqual(size.ram, 4096) + self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 4) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + size = sizes[2] + self.assertEqual(size.id, '3') + self.assertEqual(size.name, 'large') + self.assertEqual(size.ram, 8192) + self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) + self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) + self.assertEqual(size.cpu, 8) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + +class OpenNebula_3_6_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v3.6. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_6_MockHttp, OpenNebula_3_6_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',)) + + def test_create_volume(self): + new_volume = self.driver.create_volume(1000, 'test-volume') + + self.assertEqual(new_volume.id, '5') + self.assertEqual(new_volume.size, 1000) + self.assertEqual(new_volume.name, 'test-volume') + + def test_destroy_volume(self): + images = self.driver.list_images() + + self.assertEqual(len(images), 2) + image = images[0] + + ret = self.driver.destroy_volume(image) + self.assertTrue(ret) + + def test_attach_volume(self): + nodes = self.driver.list_nodes() + node = nodes[0] + + images = self.driver.list_images() + image = images[0] + + ret = self.driver.attach_volume(node, image, 'sda') + self.assertTrue(ret) + + def test_detach_volume(self): + images = self.driver.list_images() + image = images[1] + + ret = self.driver.detach_volume(image) + self.assertTrue(ret) + + nodes = self.driver.list_nodes() + # node with only a single associated image + node = nodes[1] + + ret = self.driver.detach_volume(node.image) + self.assertFalse(ret) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + + self.assertEqual(len(volumes), 2) + + volume = volumes[0] + self.assertEqual(volume.id, '5') + self.assertEqual(volume.size, 2048) + self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP') + + volume = volumes[1] + self.assertEqual(volume.id, '15') + self.assertEqual(volume.size, 1024) + self.assertEqual(volume.name, 'Debian Sid') + + +class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin): + + """ + OpenNebula.org test suite for OpenNebula v3.8. + """ + + def setUp(self): + """ + Setup test environment. + """ + OpenNebulaNodeDriver.connectionCls.conn_classes = ( + OpenNebula_3_8_MockHttp, OpenNebula_3_8_MockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',)) + + def test_list_sizes(self): + """ + Test ex_list_networks functionality. + """ + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.name, 'small') + self.assertEqual(size.ram, 1024) + self.assertEqual(size.cpu, 1) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + size = sizes[1] + self.assertEqual(size.id, '2') + self.assertEqual(size.name, 'medium') + self.assertEqual(size.ram, 4096) + self.assertEqual(size.cpu, 4) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + size = sizes[2] + self.assertEqual(size.id, '3') + self.assertEqual(size.name, 'large') + self.assertEqual(size.ram, 8192) + self.assertEqual(size.cpu, 8) + self.assertEqual(size.vcpu, None) + self.assertEqual(size.disk, None) + self.assertEqual(size.bandwidth, None) + self.assertEqual(size.price, None) + + +class OpenNebula_1_4_MockHttp(MockHttp): + + """ + Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver. + """ + + fixtures = ComputeFileFixtures('opennebula_1_4') + + def _compute(self, method, url, body, headers): + """ + Compute pool resources. + """ + if method == 'GET': + body = self.fixtures.load('computes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('compute_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _storage(self, method, url, body, headers): + """ + Storage pool resources. + """ + if method == 'GET': + body = self.fixtures.load('storage.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('disk_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _network(self, method, url, body, headers): + """ + Network pool resources. + """ + if method == 'GET': + body = self.fixtures.load('networks.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('network_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _compute_5(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _compute_15(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _compute_25(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_25.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _storage_5(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures.load('disk_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _storage_15(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures.load('disk_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _network_5(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures.load('network_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + def _network_15(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures.load('network_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.OK, body, {}, + httplib.responses[httplib.OK]) + + +class OpenNebula_2_0_MockHttp(MockHttp): + + """ + Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org + compute driver. + """ + + fixtures = ComputeFileFixtures('opennebula_2_0') + + def _compute(self, method, url, body, headers): + """ + Compute pool resources. + """ + if method == 'GET': + body = self.fixtures.load('compute_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('compute_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _storage(self, method, url, body, headers): + """ + Storage pool resources. + """ + if method == 'GET': + body = self.fixtures.load('storage_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('storage_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _network(self, method, url, body, headers): + """ + Network pool resources. + """ + if method == 'GET': + body = self.fixtures.load('network_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('network_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _compute_5(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _compute_15(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _compute_25(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_25.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _storage_5(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures.load('storage_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _storage_15(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures.load('storage_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _network_5(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures.load('network_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _network_15(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures.load('network_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + +class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp): + + """ + Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver. + """ + + fixtures_3_0 = ComputeFileFixtures('opennebula_3_0') + + def _network(self, method, url, body, headers): + """ + Network pool resources. + """ + if method == 'GET': + body = self.fixtures_3_0.load('network_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('network_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _network_5(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures_3_0.load('network_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _network_15(self, method, url, body, headers): + """ + Network entry resource. + """ + if method == 'GET': + body = self.fixtures_3_0.load('network_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + +class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp): + + """ + Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver. + """ + + fixtures_3_2 = ComputeFileFixtures('opennebula_3_2') + + def _compute_5(self, method, url, body, headers): + """ + Compute entry resource. + """ + if method == 'GET': + body = self.fixtures.load('compute_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _instance_type(self, method, url, body, headers): + """ + Instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_2.load('instance_type_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp): + + """ + Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver. + """ + + fixtures_3_6 = ComputeFileFixtures('opennebula_3_6') + + def _storage(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('storage_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures_3_6.load('storage_5.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _compute_5(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures_3_6.load('compute_5.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _compute_5_action(self, method, url, body, headers): + body = self.fixtures_3_6.load('compute_5.xml') + if method == 'POST': + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'GET': + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _compute_15(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures_3_6.load('compute_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _storage_10(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures_3_6.load('disk_10.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _storage_15(self, method, url, body, headers): + """ + Storage entry resource. + """ + if method == 'GET': + body = self.fixtures_3_6.load('disk_15.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp): + + """ + Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver. + """ + + fixtures_3_8 = ComputeFileFixtures('opennebula_3_8') + + def _instance_type(self, method, url, body, headers): + """ + Instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_collection.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_small(self, method, url, body, headers): + """ + Small instance type. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_small.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_medium(self, method, url, body, headers): + """ + Medium instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_medium.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _instance_type_large(self, method, url, body, headers): + """ + Large instance type pool. + """ + if method == 'GET': + body = self.fixtures_3_8.load('instance_type_large.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_openstack.py libcloud-0.15.1/libcloud/test/compute/test_openstack.py --- libcloud-0.5.0/libcloud/test/compute/test_openstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_openstack.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,2014 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +import unittest +import datetime + +try: + import simplejson as json +except ImportError: + import json + +from mock import Mock + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import method_type +from libcloud.utils.py3 import u + +from libcloud.common.types import InvalidCredsError, MalformedResponseError, \ + LibcloudError +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.openstack import OpenStackAuthConnection +from libcloud.common.openstack import AUTH_TOKEN_EXPIRES_GRACE_SECONDS +from libcloud.compute.types import Provider, KeyPairDoesNotExistError +from libcloud.compute.providers import get_driver +from libcloud.compute.drivers.openstack import ( + OpenStack_1_0_NodeDriver, OpenStack_1_0_Response, + OpenStack_1_1_NodeDriver, OpenStackSecurityGroup, + OpenStackSecurityGroupRule, OpenStack_1_1_FloatingIpPool, + OpenStack_1_1_FloatingIpAddress, OpenStackKeyPair +) +from libcloud.compute.base import Node, NodeImage, NodeSize +from libcloud.pricing import set_pricing, clear_pricing_data + +from libcloud.test import MockResponse, MockHttpTestCase, XML_HEADERS +from libcloud.test.file_fixtures import ComputeFileFixtures, OpenStackFixtures +from libcloud.test.compute import TestCaseMixin + +from libcloud.test.secrets import OPENSTACK_PARAMS + +BASE_DIR = os.path.abspath(os.path.split(__file__)[0]) + + +class OpenStack_1_0_ResponseTestCase(unittest.TestCase): + XML = """""" + + def test_simple_xml_content_type_handling(self): + http_response = MockResponse( + 200, OpenStack_1_0_ResponseTestCase.XML, headers={'content-type': 'application/xml'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML") + + def test_extended_xml_content_type_handling(self): + http_response = MockResponse(200, + OpenStack_1_0_ResponseTestCase.XML, + headers={'content-type': 'application/xml; charset=UTF-8'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML") + + def test_non_xml_content_type_handling(self): + RESPONSE_BODY = "Accepted" + + http_response = MockResponse( + 202, RESPONSE_BODY, headers={'content-type': 'text/html'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertEqual( + body, RESPONSE_BODY, "Non-XML body should be returned as is") + + +class OpenStackServiceCatalogTests(unittest.TestCase): + # TODO refactor and move into libcloud/test/common + + def setUp(self): + OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, + OpenStackMockHttp) + + def test_connection_get_service_catalog(self): + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + connection.auth_url = "https://auth.api.example.com" + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + result = connection.get_service_catalog() + catalog = result.get_catalog() + endpoints = result.get_endpoints('cloudFilesCDN', 'cloudFilesCDN') + public_urls = result.get_public_urls('cloudFilesCDN', 'cloudFilesCDN') + + expected_urls = [ + 'https://cdn2.clouddrive.com/v1/MossoCloudFS', + 'https://cdn2.clouddrive.com/v1/MossoCloudFS' + ] + + self.assertTrue('cloudFilesCDN' in catalog) + self.assertEqual(len(endpoints), 2) + self.assertEqual(public_urls, expected_urls) + + +class OpenStackAuthConnectionTests(unittest.TestCase): + # TODO refactor and move into libcloud/test/common + + def setUp(self): + OpenStackBaseConnection.auth_url = None + OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, + OpenStackMockHttp) + + def test_auth_url_is_correctly_assembled(self): + tuples = [ + ('1.0', OpenStackMockHttp), + ('1.1', OpenStackMockHttp), + ('2.0', OpenStack_2_0_MockHttp), + ('2.0_apikey', OpenStack_2_0_MockHttp), + ('2.0_password', OpenStack_2_0_MockHttp) + ] + + APPEND = 0 + NOTAPPEND = 1 + + auth_urls = [ + ('https://auth.api.example.com', APPEND, ''), + ('https://auth.api.example.com/', NOTAPPEND, '/'), + ('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'), + ('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/') + ] + + actions = { + '1.0': '/v1.0', + '1.1': '/v1.1/auth', + '2.0': '/v2.0/tokens', + '2.0_apikey': '/v2.0/tokens', + '2.0_password': '/v2.0/tokens' + } + + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + for (auth_version, mock_http_class) in tuples: + for (url, should_append_default_path, expected_path) in auth_urls: + connection = \ + self._get_mock_connection(mock_http_class=mock_http_class, + auth_url=url) + + auth_url = connection.auth_url + + osa = OpenStackAuthConnection(connection, + auth_url, + auth_version, + user_id, key) + + try: + osa = osa.authenticate() + except: + pass + + if (should_append_default_path == APPEND): + expected_path = actions[auth_version] + + self.assertEqual(osa.action, expected_path) + + def test_basic_authentication(self): + tuples = [ + ('1.0', OpenStackMockHttp), + ('1.1', OpenStackMockHttp), + ('2.0', OpenStack_2_0_MockHttp), + ('2.0_apikey', OpenStack_2_0_MockHttp), + ('2.0_password', OpenStack_2_0_MockHttp) + ] + + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + for (auth_version, mock_http_class) in tuples: + connection = \ + self._get_mock_connection(mock_http_class=mock_http_class) + auth_url = connection.auth_url + + osa = OpenStackAuthConnection(connection, auth_url, auth_version, + user_id, key) + + self.assertEqual(osa.urls, {}) + self.assertEqual(osa.auth_token, None) + self.assertEqual(osa.auth_user_info, None) + osa = osa.authenticate() + + self.assertTrue(len(osa.urls) >= 1) + self.assertTrue(osa.auth_token is not None) + + if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_token_expires is not None) + + if auth_version in ['2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_user_info is not None) + + def test_token_expiration_and_force_reauthentication(self): + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + connection = self._get_mock_connection(OpenStack_2_0_MockHttp) + auth_url = connection.auth_url + auth_version = '2.0' + + yesterday = datetime.datetime.today() - datetime.timedelta(1) + tomorrow = datetime.datetime.today() + datetime.timedelta(1) + + osa = OpenStackAuthConnection(connection, auth_url, auth_version, + user_id, key) + + mocked_auth_method = Mock(wraps=osa.authenticate_2_0_with_body) + osa.authenticate_2_0_with_body = mocked_auth_method + + # Force re-auth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + count = 5 + + for i in range(0, count): + osa.authenticate(force=True) + + self.assertEqual(mocked_auth_method.call_count, count) + + # No force reauth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + self.assertEqual(mocked_auth_method.call_count, 1) + + # No force reauth, valid / non-expired token + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + if i == 0: + osa.auth_token_expires = tomorrow + + self.assertEqual(mocked_auth_method.call_count, 1) + + # No force reauth, valid / non-expired token which is about to expire in + # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS + soon = datetime.datetime.utcnow() + \ + datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + if i == 0: + osa.auth_token_expires = soon + + osa.authenticate(force=False) + + self.assertEqual(mocked_auth_method.call_count, 1) + + def _get_mock_connection(self, mock_http_class, auth_url=None): + OpenStackBaseConnection.conn_classes = (mock_http_class, + mock_http_class) + + if auth_url is None: + auth_url = "https://auth.api.example.com" + + OpenStackBaseConnection.auth_url = auth_url + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + return connection + + +class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): + should_list_locations = False + should_list_volumes = False + + driver_klass = OpenStack_1_0_NodeDriver + driver_args = OPENSTACK_PARAMS + driver_kwargs = {} + # driver_kwargs = {'ex_force_auth_version': '1.0'} + + @classmethod + def create_driver(self): + if self is not OpenStack_1_0_FactoryMethodTests: + self.driver_type = self.driver_klass + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + # monkeypatch get_endpoint because the base openstack driver doesn't actually + # work with old devstack but this class/tests are still used by the rackspace + # driver + def get_endpoint(*args, **kwargs): + return "https://servers.api.rackspacecloud.com/v1.0/slug" + self.driver_klass.connectionCls.get_endpoint = get_endpoint + + self.driver_klass.connectionCls.conn_classes = (OpenStackMockHttp, + OpenStackMockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" + + OpenStackMockHttp.type = None + + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + + def test_populate_hosts_and_requests_path(self): + tomorrow = datetime.datetime.today() + datetime.timedelta(1) + cls = self.driver_klass.connectionCls + + count = 5 + + # Test authentication and token re-use + con = cls('username', 'key') + osa = con._osa + + mocked_auth_method = Mock() + osa.authenticate = mocked_auth_method + + # Valid token returned on first call, should be reused. + for i in range(0, count): + con._populate_hosts_and_request_paths() + + if i == 0: + osa.auth_token = '1234' + osa.auth_token_expires = tomorrow + + self.assertEqual(mocked_auth_method.call_count, 1) + + osa.auth_token = None + osa.auth_token_expires = None + + # ex_force_auth_token provided, authenticate should never be called + con = cls('username', 'key', ex_force_base_url='http://ponies', + ex_force_auth_token='1234') + osa = con._osa + + mocked_auth_method = Mock() + osa.authenticate = mocked_auth_method + + for i in range(0, count): + con._populate_hosts_and_request_paths() + + self.assertEqual(mocked_auth_method.call_count, 0) + + def test_auth_token_is_set(self): + self.driver.connection._populate_hosts_and_request_paths() + self.assertEqual( + self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc") + + def test_auth_token_expires_is_set(self): + self.driver.connection._populate_hosts_and_request_paths() + + expires = self.driver.connection.auth_token_expires + self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00") + + def test_auth(self): + if self.driver.connection._auth_version == '2.0': + return + + OpenStackMockHttp.type = 'UNAUTHORIZED' + try: + self.driver = self.create_driver() + self.driver.list_nodes() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_auth_missing_key(self): + if self.driver.connection._auth_version == '2.0': + return + + OpenStackMockHttp.type = 'UNAUTHORIZED_MISSING_KEY' + try: + self.driver = self.create_driver() + self.driver.list_nodes() + except MalformedResponseError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, MalformedResponseError)) + else: + self.fail('test should have thrown') + + def test_auth_server_error(self): + if self.driver.connection._auth_version == '2.0': + return + + OpenStackMockHttp.type = 'INTERNAL_SERVER_ERROR' + try: + self.driver = self.create_driver() + self.driver.list_nodes() + except MalformedResponseError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, MalformedResponseError)) + else: + self.fail('test should have thrown') + + def test_error_parsing_when_body_is_missing_message(self): + OpenStackMockHttp.type = 'NO_MESSAGE_IN_ERROR_BODY' + try: + self.driver.list_images() + except Exception: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, Exception)) + else: + self.fail('test should have thrown') + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 1) + + def test_list_nodes(self): + OpenStackMockHttp.type = 'EMPTY' + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 0) + OpenStackMockHttp.type = None + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 1) + node = ret[0] + self.assertEqual('67.23.21.33', node.public_ips[0]) + self.assertTrue('10.176.168.218' in node.private_ips) + self.assertEqual(node.extra.get('flavorId'), '1') + self.assertEqual(node.extra.get('imageId'), '11') + self.assertEqual(type(node.extra.get('metadata')), type(dict())) + OpenStackMockHttp.type = 'METADATA' + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 1) + node = ret[0] + self.assertEqual(type(node.extra.get('metadata')), type(dict())) + self.assertEqual(node.extra.get('metadata').get('somekey'), + 'somevalue') + OpenStackMockHttp.type = None + + def test_list_images(self): + ret = self.driver.list_images() + expected = {10: {'serverId': None, + 'status': 'ACTIVE', + 'created': '2009-07-20T09:14:37-05:00', + 'updated': '2009-07-20T09:14:37-05:00', + 'progress': None, + 'minDisk': None, + 'minRam': None}, + 11: {'serverId': '91221', + 'status': 'ACTIVE', + 'created': '2009-11-29T20:22:09-06:00', + 'updated': '2009-11-29T20:24:08-06:00', + 'progress': '100', + 'minDisk': '5', + 'minRam': '256'}} + for ret_idx, extra in list(expected.items()): + for key, value in list(extra.items()): + self.assertEqual(ret[ret_idx].extra[key], value) + + def test_create_node(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + + def test_create_node_without_adminPass(self): + OpenStackMockHttp.type = 'NO_ADMIN_PASS' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), None) + + def test_create_node_ex_shared_ip_group(self): + OpenStackMockHttp.type = 'EX_SHARED_IP_GROUP' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + ex_shared_ip_group_id='12345') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + + def test_create_node_with_metadata(self): + OpenStackMockHttp.type = 'METADATA' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + metadata = {'a': 'b', 'c': 'd'} + files = {'/file1': 'content1', '/file2': 'content2'} + node = self.driver.create_node(name='racktest', image=image, size=size, + metadata=metadata, files=files) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + self.assertEqual(node.extra.get('metadata'), metadata) + + def test_reboot_node(self): + node = Node(id=72258, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + ret = node.reboot() + self.assertTrue(ret is True) + + def test_destroy_node(self): + node = Node(id=72258, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + ret = node.destroy() + self.assertTrue(ret is True) + + def test_ex_limits(self): + limits = self.driver.ex_limits() + self.assertTrue("rate" in limits) + self.assertTrue("absolute" in limits) + + def test_create_image(self): + node = Node(id=444222, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + image = self.driver.create_image(node, "imgtest") + self.assertEqual(image.name, "imgtest") + self.assertEqual(image.id, "12345") + + def test_delete_image(self): + image = NodeImage(id=333111, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + ret = self.driver.delete_image(image) + self.assertTrue(ret) + + def test_ex_list_ip_addresses(self): + ret = self.driver.ex_list_ip_addresses(node_id=72258) + self.assertEqual(2, len(ret.public_addresses)) + self.assertTrue('67.23.10.131' in ret.public_addresses) + self.assertTrue('67.23.10.132' in ret.public_addresses) + self.assertEqual(1, len(ret.private_addresses)) + self.assertTrue('10.176.42.16' in ret.private_addresses) + + def test_ex_list_ip_groups(self): + ret = self.driver.ex_list_ip_groups() + self.assertEqual(2, len(ret)) + self.assertEqual('1234', ret[0].id) + self.assertEqual('Shared IP Group 1', ret[0].name) + self.assertEqual('5678', ret[1].id) + self.assertEqual('Shared IP Group 2', ret[1].name) + self.assertTrue(ret[0].servers is None) + + def test_ex_list_ip_groups_detail(self): + ret = self.driver.ex_list_ip_groups(details=True) + + self.assertEqual(2, len(ret)) + + self.assertEqual('1234', ret[0].id) + self.assertEqual('Shared IP Group 1', ret[0].name) + self.assertEqual(2, len(ret[0].servers)) + self.assertEqual('422', ret[0].servers[0]) + self.assertEqual('3445', ret[0].servers[1]) + + self.assertEqual('5678', ret[1].id) + self.assertEqual('Shared IP Group 2', ret[1].name) + self.assertEqual(3, len(ret[1].servers)) + self.assertEqual('23203', ret[1].servers[0]) + self.assertEqual('2456', ret[1].servers[1]) + self.assertEqual('9891', ret[1].servers[2]) + + def test_ex_create_ip_group(self): + ret = self.driver.ex_create_ip_group('Shared IP Group 1', '5467') + self.assertEqual('1234', ret.id) + self.assertEqual('Shared IP Group 1', ret.name) + self.assertEqual(1, len(ret.servers)) + self.assertEqual('422', ret.servers[0]) + + def test_ex_delete_ip_group(self): + ret = self.driver.ex_delete_ip_group('5467') + self.assertEqual(True, ret) + + def test_ex_share_ip(self): + ret = self.driver.ex_share_ip('1234', '3445', '67.23.21.133') + self.assertEqual(True, ret) + + def test_ex_unshare_ip(self): + ret = self.driver.ex_unshare_ip('3445', '67.23.21.133') + self.assertEqual(True, ret) + + def test_ex_resize(self): + node = Node(id=444222, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + self.assertTrue(self.driver.ex_resize(node=node, size=size)) + + def test_ex_confirm_resize(self): + node = Node(id=444222, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + self.assertTrue(self.driver.ex_confirm_resize(node=node)) + + def test_ex_revert_resize(self): + node = Node(id=444222, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + self.assertTrue(self.driver.ex_revert_resize(node=node)) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 7, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + + if self.driver.api_name == 'openstack': + self.assertEqual(size.price, 0, + 'Size price should be zero by default') + + def test_list_sizes_with_specified_pricing(self): + if self.driver.api_name != 'openstack': + return + + pricing = dict((str(i), i) for i in range(1, 8)) + + set_pricing(driver_type='compute', driver_name='openstack', + pricing=pricing) + + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 7, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + self.assertEqual(float(size.price), float(pricing[size.id])) + + +class OpenStack_1_0_FactoryMethodTests(OpenStack_1_0_Tests): + should_list_locations = False + should_list_volumes = False + + driver_klass = OpenStack_1_0_NodeDriver + driver_type = get_driver(Provider.OPENSTACK) + driver_args = OPENSTACK_PARAMS + ('1.0',) + + def test_factory_method_invalid_version(self): + try: + self.driver_type(*(OPENSTACK_PARAMS + ('15.5',))) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + +class OpenStackMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('openstack') + auth_fixtures = OpenStackFixtures() + json_content_headers = {'content-type': 'application/json; charset=UTF-8'} + + # fake auth token response + def _v1_0(self, method, url, body, headers): + headers = { + 'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _v1_0_INTERNAL_SERVER_ERROR(self, method, url, body, headers): + return (httplib.INTERNAL_SERVER_ERROR, "

500: Internal Server Error

", {}, + httplib.responses[httplib.INTERNAL_SERVER_ERROR]) + + def _v1_0_slug_images_detail_NO_MESSAGE_IN_ERROR_BODY(self, method, url, body, headers): + body = self.fixtures.load('300_multiple_choices.json') + return (httplib.MULTIPLE_CHOICES, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_0_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers): + headers = { + 'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-tokenx': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v2_0_tokens(self, method, url, body, headers): + body = self.auth_fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_empty.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_METADATA(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_metadata.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_images_333111(self, method, url, body, headers): + if method != "DELETE": + raise NotImplementedError() + # this is currently used for deletion of an image + # as such it should not accept GET/POST + return(httplib.NO_CONTENT, "", "", httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_slug_images(self, method, url, body, headers): + if method != "POST": + raise NotImplementedError() + # this is currently used for creation of new image with + # POST request, don't handle GET to avoid possible confusion + body = self.fixtures.load('v1_slug_images_post.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_images_detail(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_images_detail.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_NO_ADMIN_PASS(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_no_admin_pass.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_EX_SHARED_IP_GROUP(self, method, url, body, headers): + # test_create_node_ex_shared_ip_group + # Verify that the body contains sharedIpGroupId XML element + body = u(body) + self.assertTrue(body.find('sharedIpGroupId="12345"') != -1) + body = self.fixtures.load('v1_slug_servers.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_METADATA(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_metadata.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_72258_action(self, method, url, body, headers): + if method != "POST" or body[:8] != "500: Internal Server Error", {'content-type': 'text/html'}, + httplib.responses[httplib.INTERNAL_SERVER_ERROR]) + + +class OpenStack_1_1_Tests(unittest.TestCase, TestCaseMixin): + should_list_locations = False + should_list_volumes = True + + driver_klass = OpenStack_1_1_NodeDriver + driver_type = OpenStack_1_1_NodeDriver + driver_args = OPENSTACK_PARAMS + driver_kwargs = {'ex_force_auth_version': '2.0'} + + @classmethod + def create_driver(self): + if self is not OpenStack_1_1_FactoryMethodTests: + self.driver_type = self.driver_klass + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = ( + OpenStack_2_0_MockHttp, OpenStack_2_0_MockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" + + OpenStackMockHttp.type = None + OpenStack_1_1_MockHttp.type = None + OpenStack_2_0_MockHttp.type = None + + self.driver = self.create_driver() + + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def _force_reauthentication(self): + """ + Trash current auth token so driver will be forced to re-authentication + on next request. + """ + self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url' + self.driver.connection.auth_token = None + self.driver.connection.auth_token_expires = None + self.driver.connection._osa.auth_token = None + self.driver.connection._osa.auth_token_expires = None + + def test_auth_token_is_set(self): + self._force_reauthentication() + self.driver.connection._populate_hosts_and_request_paths() + + self.assertEqual( + self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc") + + def test_auth_token_expires_is_set(self): + self._force_reauthentication() + self.driver.connection._populate_hosts_and_request_paths() + + expires = self.driver.connection.auth_token_expires + self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00") + + def test_ex_force_base_url(self): + # change base url and trash the current auth token so we can + # re-authenticate + self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url' + self.driver.connection.auth_token = None + self.driver.connection._populate_hosts_and_request_paths() + + # assert that we use the base url and not the auth url + self.assertEqual(self.driver.connection.host, 'ex_force_base_url.com') + self.assertEqual(self.driver.connection.port, '666') + self.assertEqual(self.driver.connection.request_path, '/forced_url') + + def test_get_endpoint_populates_host_port_and_request_path(self): + # simulate a subclass overriding this method + self.driver.connection.get_endpoint = lambda: 'http://endpoint_auth_url.com:1555/service_url' + self.driver.connection.auth_token = None + self.driver.connection._ex_force_base_url = None + self.driver.connection._populate_hosts_and_request_paths() + + # assert that we use the result of get endpoint + self.assertEqual(self.driver.connection.host, 'endpoint_auth_url.com') + self.assertEqual(self.driver.connection.port, '1555') + self.assertEqual(self.driver.connection.request_path, '/service_url') + + def test_set_auth_token_populates_host_port_and_request_path(self): + # change base url and trash the current auth token so we can + # re-authenticate + self.driver.connection._ex_force_base_url = 'http://some_other_ex_force_base_url.com:1222/some-service' + self.driver.connection.auth_token = "preset-auth-token" + self.driver.connection._populate_hosts_and_request_paths() + + # assert that we use the base url and not the auth url + self.assertEqual( + self.driver.connection.host, 'some_other_ex_force_base_url.com') + self.assertEqual(self.driver.connection.port, '1222') + self.assertEqual(self.driver.connection.request_path, '/some-service') + + def test_auth_token_without_base_url_raises_exception(self): + kwargs = { + 'ex_force_auth_version': '2.0', + 'ex_force_auth_token': 'preset-auth-token' + } + try: + self.driver_type(*self.driver_args, **kwargs) + self.fail('Expected failure setting auth token without base url') + except LibcloudError: + pass + else: + self.fail('Expected failure setting auth token without base url') + + def test_ex_force_auth_token_passed_to_connection(self): + base_url = 'https://servers.api.rackspacecloud.com/v1.1/slug' + kwargs = { + 'ex_force_auth_version': '2.0', + 'ex_force_auth_token': 'preset-auth-token', + 'ex_force_base_url': base_url + } + + driver = self.driver_type(*self.driver_args, **kwargs) + driver.list_nodes() + + self.assertEqual(kwargs['ex_force_auth_token'], + driver.connection.auth_token) + self.assertEqual('servers.api.rackspacecloud.com', + driver.connection.host) + self.assertEqual('/v1.1/slug', driver.connection.request_path) + self.assertEqual(443, driver.connection.port) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + node = nodes[0] + + self.assertEqual('12065', node.id) + # test public IPv4 + self.assertTrue('12.16.18.28' in node.public_ips) + self.assertTrue('50.57.94.35' in node.public_ips) + # test public IPv6 + self.assertTrue( + '2001:4801:7808:52:16:3eff:fe47:788a' in node.public_ips) + # test private IPv4 + self.assertTrue('10.182.64.34' in node.private_ips) + # test private IPv6 + self.assertTrue( + 'fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips) + + self.assertEqual(node.extra.get('flavorId'), '2') + self.assertEqual(node.extra.get('imageId'), '7') + self.assertEqual(node.extra.get('metadata'), {}) + self.assertEqual(node.extra['updated'], '2011-10-11T00:50:04Z') + self.assertEqual(node.extra['created'], '2011-10-11T00:51:39Z') + + def test_list_nodes_no_image_id_attribute(self): + # Regression test for LIBCLOD-455 + self.driver_klass.connectionCls.conn_classes[0].type = 'ERROR_STATE_NO_IMAGE_ID' + self.driver_klass.connectionCls.conn_classes[1].type = 'ERROR_STATE_NO_IMAGE_ID' + + nodes = self.driver.list_nodes() + self.assertEqual(nodes[0].extra['imageId'], None) + + def test_list_volumes(self): + volumes = self.driver.list_volumes() + self.assertEqual(len(volumes), 2) + volume = volumes[0] + + self.assertEqual('cd76a3a1-c4ce-40f6-9b9f-07a61508938d', volume.id) + self.assertEqual('test_volume_2', volume.name) + self.assertEqual(2, volume.size) + + self.assertEqual(volume.extra['description'], '') + self.assertEqual(volume.extra['attachments'][0][ + 'id'], 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + + volume = volumes[1] + self.assertEqual('cfcec3bc-b736-4db5-9535-4c24112691b5', volume.id) + self.assertEqual('test_volume', volume.name) + self.assertEqual(50, volume.size) + + self.assertEqual(volume.extra['description'], 'some description') + self.assertEqual(volume.extra['attachments'], []) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 8, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + + self.assertEqual(sizes[0].vcpus, 8) + + def test_list_sizes_with_specified_pricing(self): + + pricing = dict((str(i), i * 5.0) for i in range(1, 9)) + + set_pricing(driver_type='compute', + driver_name=self.driver.api_name, pricing=pricing) + + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 8, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + + self.assertEqual(size.price, pricing[size.id], + 'Size price should match') + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 13, 'Wrong images count') + + image = images[0] + self.assertEqual(image.id, '13') + self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') + self.assertEqual(image.extra['updated'], '2011-08-06T18:14:02Z') + self.assertEqual(image.extra['created'], '2011-08-06T18:13:11Z') + self.assertEqual(image.extra['status'], 'ACTIVE') + self.assertEqual(image.extra['metadata']['os_type'], 'windows') + self.assertEqual( + image.extra['serverId'], '52415800-8b69-11e0-9b19-734f335aa7b3') + self.assertEqual(image.extra['minDisk'], 0) + self.assertEqual(image.extra['minRam'], 0) + + def test_create_node(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['password'], 'racktestvJq7d3') + self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') + + def test_create_node_with_ex_keyname_and_ex_userdata(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + ex_keyname='devstack', + ex_userdata='sample data') + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['password'], 'racktestvJq7d3') + self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') + self.assertEqual(node.extra['key_name'], 'devstack') + + def test_create_node_with_availability_zone(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + availability_zone='testaz') + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['password'], 'racktestvJq7d3') + self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') + self.assertEqual(node.extra['availability_zone'], 'testaz') + + def test_create_node_with_ex_disk_config(self): + OpenStack_1_1_MockHttp.type = 'EX_DISK_CONFIG' + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + ex_disk_config='AUTO') + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['disk_config'], 'AUTO') + + def test_destroy_node(self): + self.assertTrue(self.node.destroy()) + + def test_reboot_node(self): + self.assertTrue(self.node.reboot()) + + def test_create_volume(self): + self.assertEqual(self.driver.create_volume(1, 'test'), True) + + def test_destroy_volume(self): + volume = self.driver.ex_get_volume( + 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual(self.driver.destroy_volume(volume), True) + + def test_attach_volume(self): + node = self.driver.list_nodes()[0] + volume = self.driver.ex_get_volume( + 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual( + self.driver.attach_volume(node, volume, '/dev/sdb'), True) + + def test_detach_volume(self): + node = self.driver.list_nodes()[0] + volume = self.driver.ex_get_volume( + 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual( + self.driver.attach_volume(node, volume, '/dev/sdb'), True) + self.assertEqual(self.driver.detach_volume(volume), True) + + def test_ex_set_password(self): + self.assertTrue(self.driver.ex_set_password(self.node, 'New1&53jPass')) + + def test_ex_rebuild(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + success = self.driver.ex_rebuild(self.node, image=image) + self.assertTrue(success) + + def test_ex_rebuild_with_ex_disk_config(self): + image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)', + driver=self.driver) + node = Node(id=12066, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + success = self.driver.ex_rebuild(node, image=image, + ex_disk_config='MANUAL') + self.assertTrue(success) + + def test_ex_resize(self): + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + try: + self.driver.ex_resize(self.node, size) + except Exception: + e = sys.exc_info()[1] + self.fail('An error was raised: ' + repr(e)) + + def test_ex_confirm_resize(self): + try: + self.driver.ex_confirm_resize(self.node) + except Exception: + e = sys.exc_info()[1] + self.fail('An error was raised: ' + repr(e)) + + def test_ex_revert_resize(self): + try: + self.driver.ex_revert_resize(self.node) + except Exception: + e = sys.exc_info()[1] + self.fail('An error was raised: ' + repr(e)) + + def test_create_image(self): + image = self.driver.create_image(self.node, 'new_image') + self.assertEqual(image.name, 'new_image') + self.assertEqual(image.id, '4949f9ee-2421-4c81-8b49-13119446008b') + + def test_ex_set_server_name(self): + old_node = Node( + id='12064', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + new_node = self.driver.ex_set_server_name(old_node, 'Bob') + self.assertEqual('Bob', new_node.name) + + def test_ex_set_metadata(self): + old_node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} + returned_metadata = self.driver.ex_set_metadata(old_node, metadata) + self.assertEqual(metadata, returned_metadata) + + def test_ex_get_metadata(self): + node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + + metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} + returned_metadata = self.driver.ex_get_metadata(node) + self.assertEqual(metadata, returned_metadata) + + def test_ex_update_node(self): + old_node = Node( + id='12064', + name=None, state=None, public_ips=None, private_ips=None, driver=self.driver, + ) + + new_node = self.driver.ex_update_node(old_node, name='Bob') + + self.assertTrue(new_node) + self.assertEqual('Bob', new_node.name) + self.assertEqual('50.57.94.30', new_node.public_ips[0]) + + def test_ex_get_node_details(self): + node_id = '12064' + node = self.driver.ex_get_node_details(node_id) + self.assertEqual(node.id, '12064') + self.assertEqual(node.name, 'lc-test') + + def test_ex_get_size(self): + size_id = '7' + size = self.driver.ex_get_size(size_id) + self.assertEqual(size.id, size_id) + self.assertEqual(size.name, '15.5GB slice') + + def test_get_image(self): + image_id = '13' + image = self.driver.get_image(image_id) + self.assertEqual(image.id, image_id) + self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') + self.assertEqual(image.extra['serverId'], None) + self.assertEqual(image.extra['minDisk'], "5") + self.assertEqual(image.extra['minRam'], "256") + + def test_delete_image(self): + image = NodeImage( + id='26365521-8c62-11f9-2c33-283d153ecc3a', name='My Backup', driver=self.driver) + result = self.driver.delete_image(image) + self.assertTrue(result) + + def test_extract_image_id_from_url(self): + url = 'http://127.0.0.1/v1.1/68/images/1d4a8ea9-aae7-4242-a42d-5ff4702f2f14' + url_two = 'http://127.0.0.1/v1.1/68/images/13' + image_id = self.driver._extract_image_id_from_url(url) + image_id_two = self.driver._extract_image_id_from_url(url_two) + self.assertEqual(image_id, '1d4a8ea9-aae7-4242-a42d-5ff4702f2f14') + self.assertEqual(image_id_two, '13') + + def test_ex_rescue_with_password(self): + node = Node(id=12064, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + n = self.driver.ex_rescue(node, 'foo') + self.assertEqual(n.extra['password'], 'foo') + + def test_ex_rescue_no_password(self): + node = Node(id=12064, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + n = self.driver.ex_rescue(node) + self.assertEqual(n.extra['password'], 'foo') + + def test_ex_unrescue(self): + node = Node(id=12064, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + result = self.driver.ex_unrescue(node) + self.assertTrue(result) + + def test_ex_get_node_security_groups(self): + node = Node(id='1c01300f-ef97-4937-8f03-ac676d6234be', name=None, + state=None, public_ips=None, private_ips=None, driver=self.driver) + security_groups = self.driver.ex_get_node_security_groups(node) + self.assertEqual( + len(security_groups), 2, 'Wrong security groups count') + + security_group = security_groups[1] + self.assertEqual(security_group.id, 4) + self.assertEqual(security_group.tenant_id, '68') + self.assertEqual(security_group.name, 'ftp') + self.assertEqual( + security_group.description, 'FTP Client-Server - Open 20-21 ports') + self.assertEqual(security_group.rules[0].id, 1) + self.assertEqual(security_group.rules[0].parent_group_id, 4) + self.assertEqual(security_group.rules[0].ip_protocol, "tcp") + self.assertEqual(security_group.rules[0].from_port, 20) + self.assertEqual(security_group.rules[0].to_port, 21) + self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0') + + def test_ex_list_security_groups(self): + security_groups = self.driver.ex_list_security_groups() + self.assertEqual( + len(security_groups), 2, 'Wrong security groups count') + + security_group = security_groups[1] + self.assertEqual(security_group.id, 4) + self.assertEqual(security_group.tenant_id, '68') + self.assertEqual(security_group.name, 'ftp') + self.assertEqual( + security_group.description, 'FTP Client-Server - Open 20-21 ports') + self.assertEqual(security_group.rules[0].id, 1) + self.assertEqual(security_group.rules[0].parent_group_id, 4) + self.assertEqual(security_group.rules[0].ip_protocol, "tcp") + self.assertEqual(security_group.rules[0].from_port, 20) + self.assertEqual(security_group.rules[0].to_port, 21) + self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0') + + def test_ex_create_security_group(self): + name = 'test' + description = 'Test Security Group' + security_group = self.driver.ex_create_security_group( + name, description) + + self.assertEqual(security_group.id, 6) + self.assertEqual(security_group.tenant_id, '68') + self.assertEqual(security_group.name, name) + self.assertEqual(security_group.description, description) + self.assertEqual(len(security_group.rules), 0) + + def test_ex_delete_security_group(self): + security_group = OpenStackSecurityGroup( + id=6, tenant_id=None, name=None, description=None, driver=self.driver) + result = self.driver.ex_delete_security_group(security_group) + self.assertTrue(result) + + def test_ex_create_security_group_rule(self): + security_group = OpenStackSecurityGroup( + id=6, tenant_id=None, name=None, description=None, driver=self.driver) + security_group_rule = self.driver.ex_create_security_group_rule( + security_group, 'tcp', 14, 16, '0.0.0.0/0') + + self.assertEqual(security_group_rule.id, 2) + self.assertEqual(security_group_rule.parent_group_id, 6) + self.assertEqual(security_group_rule.ip_protocol, 'tcp') + self.assertEqual(security_group_rule.from_port, 14) + self.assertEqual(security_group_rule.to_port, 16) + self.assertEqual(security_group_rule.ip_range, '0.0.0.0/0') + self.assertEqual(security_group_rule.tenant_id, None) + + def test_ex_delete_security_group_rule(self): + security_group_rule = OpenStackSecurityGroupRule( + id=2, parent_group_id=None, ip_protocol=None, from_port=None, to_port=None, driver=self.driver) + result = self.driver.ex_delete_security_group_rule(security_group_rule) + self.assertTrue(result) + + def test_list_key_pairs(self): + keypairs = self.driver.list_key_pairs() + self.assertEqual(len(keypairs), 2, 'Wrong keypairs count') + keypair = keypairs[1] + self.assertEqual(keypair.name, 'key2') + self.assertEqual( + keypair.fingerprint, '5d:66:33:ae:99:0f:fb:cb:86:f2:bc:ae:53:99:b6:ed') + self.assertTrue(len(keypair.public_key) > 10) + self.assertEqual(keypair.private_key, None) + + def test_get_key_pair(self): + key_pair = self.driver.get_key_pair(name='test-key-pair') + + self.assertEqual(key_pair.name, 'test-key-pair') + + def test_get_key_pair_doesnt_exist(self): + self.assertRaises(KeyPairDoesNotExistError, + self.driver.get_key_pair, + name='doesnt-exist') + + def test_create_key_pair(self): + name = 'key0' + keypair = self.driver.create_key_pair(name=name) + self.assertEqual(keypair.name, name) + + self.assertEqual(keypair.fingerprint, + '80:f8:03:a7:8e:c1:c3:b1:7e:c5:8c:50:04:5e:1c:5b') + self.assertTrue(len(keypair.public_key) > 10) + self.assertTrue(len(keypair.private_key) > 10) + + def test_import_key_pair_from_file(self): + name = 'key3' + path = os.path.join( + os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') + pub_key = open(path, 'r').read() + keypair = self.driver.import_key_pair_from_file(name=name, + key_file_path=path) + self.assertEqual(keypair.name, name) + self.assertEqual( + keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a') + self.assertEqual(keypair.public_key, pub_key) + self.assertEqual(keypair.private_key, None) + + def test_import_key_pair_from_string(self): + name = 'key3' + path = os.path.join( + os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub') + pub_key = open(path, 'r').read() + keypair = self.driver.import_key_pair_from_string(name=name, + key_material=pub_key) + self.assertEqual(keypair.name, name) + self.assertEqual( + keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a') + self.assertEqual(keypair.public_key, pub_key) + self.assertEqual(keypair.private_key, None) + + def test_delete_key_pair(self): + keypair = OpenStackKeyPair( + name='key1', fingerprint=None, public_key=None, driver=self.driver) + result = self.driver.delete_key_pair(key_pair=keypair) + self.assertTrue(result) + + def test_ex_list_floating_ip_pools(self): + ret = self.driver.ex_list_floating_ip_pools() + self.assertEqual(ret[0].name, 'public') + self.assertEqual(ret[1].name, 'foobar') + + def test_ex_attach_floating_ip_to_node(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + node.id = 4242 + ip = '42.42.42.42' + + self.assertTrue(self.driver.ex_attach_floating_ip_to_node(node, ip)) + + def test_detach_floating_ip_from_node(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + node.id = 4242 + ip = '42.42.42.42' + + self.assertTrue(self.driver.ex_detach_floating_ip_from_node(node, ip)) + + def test_OpenStack_1_1_FloatingIpPool_list_floating_ips(self): + pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) + ret = pool.list_floating_ips() + + self.assertEqual(ret[0].id, '09ea1784-2f81-46dc-8c91-244b4df75bde') + self.assertEqual(ret[0].pool, pool) + self.assertEqual(ret[0].ip_address, '10.3.1.42') + self.assertEqual(ret[0].node_id, None) + self.assertEqual(ret[1].id, '04c5336a-0629-4694-ba30-04b0bdfa88a4') + self.assertEqual(ret[1].pool, pool) + self.assertEqual(ret[1].ip_address, '10.3.1.1') + self.assertEqual( + ret[1].node_id, 'fcfc96da-19e2-40fd-8497-f29da1b21143') + + def test_OpenStack_1_1_FloatingIpPool_get_floating_ip(self): + pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) + ret = pool.get_floating_ip('10.3.1.42') + + self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde') + self.assertEqual(ret.pool, pool) + self.assertEqual(ret.ip_address, '10.3.1.42') + self.assertEqual(ret.node_id, None) + + def test_OpenStack_1_1_FloatingIpPool_create_floating_ip(self): + pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) + ret = pool.create_floating_ip() + + self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde') + self.assertEqual(ret.pool, pool) + self.assertEqual(ret.ip_address, '10.3.1.42') + self.assertEqual(ret.node_id, None) + + def test_OpenStack_1_1_FloatingIpPool_delete_floating_ip(self): + pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) + ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool) + + self.assertTrue(pool.delete_floating_ip(ip)) + + def test_OpenStack_1_1_FloatingIpAddress_delete(self): + pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection) + pool.delete_floating_ip = Mock() + ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool) + + ip.pool.delete_floating_ip() + + self.assertEqual(pool.delete_floating_ip.call_count, 1) + + def test_ex_list_network(self): + networks = self.driver.ex_list_networks() + network = networks[0] + + self.assertEqual(len(networks), 3) + self.assertEqual(network.name, 'test1') + self.assertEqual(network.cidr, '127.0.0.0/24') + + def test_ex_create_network(self): + network = self.driver.ex_create_network(name='test1', + cidr='127.0.0.0/24') + self.assertEqual(network.name, 'test1') + self.assertEqual(network.cidr, '127.0.0.0/24') + + def test_ex_delete_network(self): + network = self.driver.ex_list_networks()[0] + self.assertTrue(self.driver.ex_delete_network(network=network)) + + def test_ex_get_metadata_for_node(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='foo', + image=image, + size=size) + + metadata = self.driver.ex_get_metadata_for_node(node) + self.assertEqual(metadata['My Server Name'], 'Apache1') + self.assertEqual(len(metadata), 1) + + def test_ex_pause_node(self): + node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + ret = self.driver.ex_pause_node(node) + self.assertTrue(ret is True) + + def test_ex_unpause_node(self): + node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + ret = self.driver.ex_unpause_node(node) + self.assertTrue(ret is True) + + def test_ex_suspend_node(self): + node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + ret = self.driver.ex_suspend_node(node) + self.assertTrue(ret is True) + + def test_ex_resume_node(self): + node = Node( + id='12063', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + ret = self.driver.ex_resume_node(node) + self.assertTrue(ret is True) + + def test_ex_get_console_output(self): + node = Node( + id='12086', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver, + ) + resp = self.driver.ex_get_console_output(node) + expected_output = 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE' + self.assertEqual(resp['output'], expected_output) + + def test_ex_list_snapshots(self): + if self.driver_type.type == 'rackspace': + self.conn_classes[0].type = 'RACKSPACE' + self.conn_classes[1].type = 'RACKSPACE' + + snapshots = self.driver.ex_list_snapshots() + self.assertEqual(len(snapshots), 2) + self.assertEqual(snapshots[0].extra['name'], 'snap-001') + + def test_ex_create_snapshot(self): + volume = self.driver.list_volumes()[0] + if self.driver_type.type == 'rackspace': + self.conn_classes[0].type = 'RACKSPACE' + self.conn_classes[1].type = 'RACKSPACE' + + ret = self.driver.ex_create_snapshot(volume, + 'Test Volume', + 'This is a test') + self.assertEqual(ret.id, '3fbbcccf-d058-4502-8844-6feeffdf4cb5') + + def test_ex_delete_snapshot(self): + if self.driver_type.type == 'rackspace': + self.conn_classes[0].type = 'RACKSPACE' + self.conn_classes[1].type = 'RACKSPACE' + + snapshot = self.driver.ex_list_snapshots()[0] + ret = self.driver.ex_delete_snapshot(snapshot) + self.assertTrue(ret) + + +class OpenStack_1_1_FactoryMethodTests(OpenStack_1_1_Tests): + should_list_locations = False + should_list_volumes = True + + driver_klass = OpenStack_1_1_NodeDriver + driver_type = get_driver(Provider.OPENSTACK) + driver_args = OPENSTACK_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + +class OpenStack_1_1_MockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('openstack_v1.1') + auth_fixtures = OpenStackFixtures() + json_content_headers = {'content-type': 'application/json; charset=UTF-8'} + + def _v2_0_tokens(self, method, url, body, headers): + body = self.auth_fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_0(self, method, url, body, headers): + headers = { + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-server-management-url': 'https://api.example.com/v1.1/slug', + } + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_1_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('_servers_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_detail_ERROR_STATE_NO_IMAGE_ID(self, method, url, body, headers): + body = self.fixtures.load('_servers_detail_ERROR_STATE.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_flavors_detail(self, method, url, body, headers): + body = self.fixtures.load('_flavors_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_images_detail(self, method, url, body, headers): + body = self.fixtures.load('_images_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load('_servers_create.json') + elif method == "GET": + body = self.fixtures.load('_servers.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + '_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_action(self, method, url, body, headers): + if method != "POST": + self.fail('HTTP method other than POST to action URL') + + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + def _v1_1_slug_servers_12064_action(self, method, url, body, headers): + if method != "POST": + self.fail('HTTP method other than POST to action URL') + if "createImage" in json.loads(body): + return (httplib.ACCEPTED, "", + {"location": "http://127.0.0.1/v1.1/68/images/4949f9ee-2421-4c81-8b49-13119446008b"}, + httplib.responses[httplib.ACCEPTED]) + elif "rescue" in json.loads(body): + return (httplib.OK, '{"adminPass": "foo"}', {}, + httplib.responses[httplib.OK]) + + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + def _v1_1_slug_servers_12066_action(self, method, url, body, headers): + if method != "POST": + self.fail('HTTP method other than POST to action URL') + if "rebuild" not in json.loads(body): + self.fail("Did not get expected action (rebuild) in action URL") + + self.assertTrue('\"OS-DCF:diskConfig\": \"MANUAL\"' in body, + msg="Manual disk configuration option was not specified in rebuild body: " + body) + + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + def _v1_1_slug_servers_12065(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_12064(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12064.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "PUT": + body = self.fixtures.load('_servers_12064_updated_name_bob.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_12062(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12064.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12063_metadata(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12063_metadata_two_keys.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "PUT": + body = self.fixtures.load('_servers_12063_metadata_two_keys.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_EX_DISK_CONFIG(self, method, url, body, headers): + if method == "POST": + body = u(body) + self.assertTrue(body.find('\"OS-DCF:diskConfig\": \"AUTO\"')) + body = self.fixtures.load('_servers_create_disk_config.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_flavors_7(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_flavors_7.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_images_13(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_images_13.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_images_26365521_8c62_11f9_2c33_283d153ecc3a(self, method, url, body, headers): + if method == "DELETE": + return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) + else: + raise NotImplementedError() + + def _v1_1_slug_images_4949f9ee_2421_4c81_8b49_13119446008b(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + '_images_4949f9ee_2421_4c81_8b49_13119446008b.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_1c01300f_ef97_4937_8f03_ac676d6234be_os_security_groups(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + '_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_security_groups(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_os_security_groups.json') + elif method == "POST": + body = self.fixtures.load('_os_security_groups_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_security_groups_6(self, method, url, body, headers): + if method == "DELETE": + return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_security_group_rules(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load('_os_security_group_rules_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_security_group_rules_2(self, method, url, body, headers): + if method == "DELETE": + return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_keypairs(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_os_keypairs.json') + elif method == "POST": + if 'public_key' in body: + body = self.fixtures.load('_os_keypairs_create_import.json') + else: + body = self.fixtures.load('_os_keypairs_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_keypairs_test_key_pair(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_keypairs_get_one.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_keypairs_doesnt_exist(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_keypairs_not_found.json') + else: + raise NotImplementedError() + + return (httplib.NOT_FOUND, body, self.json_content_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_1_slug_os_keypairs_key1(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_volumes(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_os_volumes.json') + elif method == "POST": + body = self.fixtures.load('_os_volumes_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + '_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json') + elif method == "DELETE": + body = '' + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_os_volume_attachments(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load( + '_servers_12065_os_volume_attachments.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_os_volume_attachments_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, + headers): + if method == "DELETE": + body = '' + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_floating_ip_pools(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_floating_ip_pools.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_floating_ips_foo_bar_id(self, method, url, body, headers): + if method == "DELETE": + body = '' + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_floating_ips(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_floating_ips.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "POST": + body = self.fixtures.load('_floating_ip.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_4242_action(self, method, url, body, headers): + if method == "POST": + body = '' + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_networks(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_networks.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == 'POST': + body = self.fixtures.load('_os_networks_POST.json') + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v1_1_slug_os_networks_f13e5051_feea_416b_827a_1a0acc2dad14(self, method, url, body, headers): + if method == 'DELETE': + body = '' + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v1_1_slug_servers_72258_action(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('_servers_suspend.json') + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12063_action(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('_servers_unpause.json') + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12086_action(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('_servers_12086_console_output.json') + return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_os_snapshots(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_snapshots.json') + elif method == 'POST': + body = self.fixtures.load('_os_snapshots_create.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_snapshots_RACKSPACE(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_snapshots_rackspace.json') + elif method == 'POST': + body = self.fixtures.load('_os_snapshots_create_rackspace.json') + else: + raise NotImplementedError() + + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5(self, method, url, body, headers): + if method == 'DELETE': + body = '' + status_code = httplib.NO_CONTENT + else: + raise NotImplementedError() + + return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5_RACKSPACE(self, method, url, body, headers): + if method == 'DELETE': + body = '' + status_code = httplib.NO_CONTENT + else: + raise NotImplementedError() + + return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK]) + + +# This exists because the nova compute url in devstack has v2 in there but the v1.1 fixtures +# work fine. + + +class OpenStack_2_0_MockHttp(OpenStack_1_1_MockHttp): + + def __init__(self, *args, **kwargs): + super(OpenStack_2_0_MockHttp, self).__init__(*args, **kwargs) + + methods1 = OpenStack_1_1_MockHttp.__dict__ + + names1 = [m for m in methods1 if m.find('_v1_1') == 0] + + for name in names1: + method = methods1[name] + new_name = name.replace('_v1_1_slug_', '_v2_1337_') + setattr(self, new_name, method_type(method, self, + OpenStack_2_0_MockHttp)) + + +class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests): + driver_args = OPENSTACK_PARAMS + ('1.1',) + driver_kwargs = {'ex_force_auth_version': '2.0'} + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = \ + (OpenStack_2_0_MockHttp, OpenStack_2_0_MockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com" + OpenStackMockHttp.type = None + OpenStack_1_1_MockHttp.type = None + OpenStack_2_0_MockHttp.type = None + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_auth_user_info_is_set(self): + self.driver.connection._populate_hosts_and_request_paths() + self.assertEqual(self.driver.connection.auth_user_info, { + 'id': '7', + 'name': 'testuser', + 'roles': [{'description': 'Default Role.', + 'id': 'identity:default', + 'name': 'identity:default'}]}) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_opsource.py libcloud-0.15.1/libcloud/test/compute/test_opsource.py --- libcloud-0.5.0/libcloud/test/compute/test_opsource.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_opsource.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,251 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.opsource import OpsourceNodeDriver as Opsource +from libcloud.compute.drivers.opsource import OpsourceAPIException +from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import OPSOURCE_PARAMS + + +class OpsourceTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + Opsource.connectionCls.conn_classes = (None, OpsourceMockHttp) + OpsourceMockHttp.type = None + self.driver = Opsource(*OPSOURCE_PARAMS) + + def test_invalid_creds(self): + OpsourceMockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + self.assertTrue( + False) # Above command should have thrown an InvalidCredsException + except InvalidCredsError: + self.assertTrue(True) + + def test_list_sizes_response(self): + OpsourceMockHttp.type = None + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 1) + size = ret[0] + self.assertEqual(size.name, 'default') + + def test_reboot_node_response(self): + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = node.reboot() + self.assertTrue(ret is True) + + def test_reboot_node_response_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + try: + node.reboot() + self.assertTrue( + False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_destroy_node_response(self): + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = node.destroy() + self.assertTrue(ret is True) + + def test_destroy_node_response_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + try: + node.destroy() + self.assertTrue( + False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_create_node_response(self): + rootPw = NodeAuthPassword('pass123') + image = self.driver.list_images()[0] + network = self.driver.ex_list_networks()[0] + node = self.driver.create_node(name='test2', image=image, auth=rootPw, + ex_description='test2 node', ex_network=network, + ex_isStarted=False) + self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') + self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') + + def test_ex_shutdown_graceful(self): + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = self.driver.ex_shutdown_graceful(node) + self.assertTrue(ret is True) + + def test_ex_shutdown_graceful_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + try: + self.driver.ex_shutdown_graceful(node) + self.assertTrue( + False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_start_node(self): + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = self.driver.ex_start_node(node) + self.assertTrue(ret is True) + + def test_ex_start_node_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + try: + self.driver.ex_start_node(node) + self.assertTrue( + False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_power_off(self): + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + ret = self.driver.ex_power_off(node) + self.assertTrue(ret is True) + + def test_ex_power_off_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ips=None, private_ips=None, driver=self.driver) + try: + self.driver.ex_power_off(node) + self.assertTrue( + False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_list_networks(self): + nets = self.driver.ex_list_networks() + self.assertEqual(nets[0].name, 'test-net1') + self.assertTrue(isinstance(nets[0].location, NodeLocation)) + + def test_node_public_ip(self): + nodes = self.driver.list_nodes() + node = [n for n in nodes if n.id == + 'abadbc7e-9e10-46ca-9d4a-194bcc6b6c16'][0] + self.assertEqual(node.public_ips[0], '200.16.132.7') + + +class OpsourceMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('opsource') + + def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _oec_0_9_myaccount(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_myaccount.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_myaccount.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_base_image(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_base_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers): + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers): + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers): + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers): + body = None + action = url.split('?')[-1] + + if action == 'restart': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml') + elif action == 'shutdown': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml') + elif action == 'delete': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml') + elif action == 'start': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml') + elif action == 'poweroff': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml') + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers): + body = None + action = url.split('?')[-1] + + if action == 'restart': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml') + elif action == 'shutdown': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml') + elif action == 'delete': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml') + elif action == 'start': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml') + elif action == 'poweroff': + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml') + + return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): + body = self.fixtures.load( + '_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers): + body = self.fixtures.load( + 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_rackspace.py libcloud-0.15.1/libcloud/test/compute/test_rackspace.py --- libcloud-0.5.0/libcloud/test/compute/test_rackspace.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_rackspace.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,213 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.utils.py3 import method_type +from libcloud.utils.py3 import httplib +from libcloud.compute.providers import DEPRECATED_RACKSPACE_PROVIDERS +from libcloud.compute.providers import get_driver +from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver +from libcloud.compute.drivers.rackspace import RackspaceNodeDriver +from libcloud.test.compute.test_openstack import OpenStack_1_0_Tests +from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests, \ + OpenStack_1_1_MockHttp +from libcloud.pricing import clear_pricing_data + +from libcloud.test.secrets import RACKSPACE_NOVA_PARAMS +from libcloud.test.secrets import RACKSPACE_PARAMS + + +class RackspaceusFirstGenUsTests(OpenStack_1_0_Tests): + should_list_locations = True + should_have_pricing = True + + driver_klass = RackspaceFirstGenNodeDriver + driver_type = RackspaceFirstGenNodeDriver + driver_args = RACKSPACE_PARAMS + driver_kwargs = {'region': 'us'} + + def test_error_is_thrown_on_accessing_old_constant(self): + for provider in DEPRECATED_RACKSPACE_PROVIDERS: + try: + get_driver(provider) + except Exception: + e = sys.exc_info()[1] + self.assertTrue(str(e).find('has been removed') != -1) + else: + self.fail('Exception was not thrown') + + def test_list_sizes_pricing(self): + sizes = self.driver.list_sizes() + + for size in sizes: + self.assertTrue(size.price > 0) + + +class RackspaceusFirstGenUkTests(OpenStack_1_0_Tests): + should_list_locations = True + should_have_pricing = True + + driver_klass = RackspaceFirstGenNodeDriver + driver_type = RackspaceFirstGenNodeDriver + driver_args = RACKSPACE_PARAMS + driver_kwargs = {'region': 'uk'} + + def test_list_sizes_pricing(self): + sizes = self.driver.list_sizes() + + for size in sizes: + self.assertTrue(size.price > 0) + + +class RackspaceNovaMockHttp(OpenStack_1_1_MockHttp): + + def __init__(self, *args, **kwargs): + super(RackspaceNovaMockHttp, self).__init__(*args, **kwargs) + + methods1 = OpenStack_1_1_MockHttp.__dict__ + + names1 = [m for m in methods1 if m.find('_v1_1') == 0] + + for name in names1: + method = methods1[name] + new_name = name.replace('_v1_1_slug_', '_v2_1337_') + setattr(self, new_name, method_type(method, self, + RackspaceNovaMockHttp)) + + def _v2_1337_os_networksv2(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('_os_networks.json') + return (httplib.OK, body, self.json_content_headers, + httplib.responses[httplib.OK]) + elif method == 'POST': + body = self.fixtures.load('_os_networks_POST.json') + return (httplib.ACCEPTED, body, self.json_content_headers, + httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v2_1337_os_networksv2_f13e5051_feea_416b_827a_1a0acc2dad14(self, + method, + url, body, + headers): + if method == 'DELETE': + body = '' + return (httplib.ACCEPTED, body, self.json_content_headers, + httplib.responses[httplib.OK]) + raise NotImplementedError() + + +class RackspaceNovaLonMockHttp(RackspaceNovaMockHttp): + + def _v2_0_tokens(self, method, url, body, headers): + body = self.auth_fixtures.load('_v2_0__auth_lon.json') + return (httplib.OK, body, self.json_content_headers, + httplib.responses[httplib.OK]) + + +class BaseRackspaceNovaTestCase(object): + conn_classes = (RackspaceNovaMockHttp, RackspaceNovaMockHttp) + auth_url = 'https://auth.api.example.com' + + def create_driver(self): + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = self.conn_classes + self.driver_klass.connectionCls.auth_url = self.auth_url + self.conn_classes[0].type = None + self.conn_classes[1].type = None + self.driver = self.create_driver() + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_service_catalog_contais_right_endpoint(self): + self.assertEqual(self.driver.connection.get_endpoint(), + self.expected_endpoint) + + def test_list_sizes_pricing(self): + sizes = self.driver.list_sizes() + + for size in sizes: + if size.ram > 256: + self.assertTrue(size.price > 0) + + +class RackspaceNovaDfwTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'dfw'} + + expected_endpoint = 'https://dfw.servers.api.rackspacecloud.com/v2/1337' + + +class RackspaceNovaOrdTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'ord'} + + expected_endpoint = 'https://ord.servers.api.rackspacecloud.com/v2/1337' + + +class RackspaceNovaIadTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'iad'} + + expected_endpoint = 'https://iad.servers.api.rackspacecloud.com/v2/1337' + + +class RackspaceNovaLonTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'lon'} + + conn_classes = (RackspaceNovaLonMockHttp, RackspaceNovaLonMockHttp) + auth_url = 'https://lon.auth.api.example.com' + + expected_endpoint = 'https://lon.servers.api.rackspacecloud.com/v2/1337' + + +class RackspaceNovaSydTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'syd'} + + expected_endpoint = 'https://syd.servers.api.rackspacecloud.com/v2/1337' + + +class RackspaceNovaHkgTests(BaseRackspaceNovaTestCase, OpenStack_1_1_Tests): + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_NOVA_PARAMS + driver_kwargs = {'region': 'hkg'} + + expected_endpoint = 'https://hkg.servers.api.rackspacecloud.com/v2/1337' + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_rimuhosting.py libcloud-0.15.1/libcloud/test/compute/test_rimuhosting.py --- libcloud-0.5.0/libcloud/test/compute/test_rimuhosting.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_rimuhosting.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Copyright 2009 RedRata Ltd + +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.rimuhosting import RimuHostingNodeDriver + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class RimuHostingTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + RimuHostingNodeDriver.connectionCls.conn_classes = (None, + RimuHostingMockHttp) + self.driver = RimuHostingNodeDriver('foo') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + node = nodes[0] + self.assertEqual(node.public_ips[0], "1.2.3.4") + self.assertEqual(node.public_ips[1], "1.2.3.5") + self.assertEqual(node.extra['order_oid'], 88833465) + self.assertEqual(node.id, "order-88833465-api-ivan-net-nz") + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 1) + size = sizes[0] + self.assertEqual(size.ram, 950) + self.assertEqual(size.disk, 20) + self.assertEqual(size.bandwidth, 75) + self.assertEqual(size.price, 32.54) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 6) + image = images[0] + self.assertEqual(image.name, "Debian 5.0 (aka Lenny, RimuHosting" + " recommended distro)") + self.assertEqual(image.id, "lenny") + + def test_reboot_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node(self): + # Raises exception on failure + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + self.driver.create_node(name="api.ivan.net.nz", image=image, size=size) + + +class RimuHostingMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('rimuhosting') + + def _r_orders(self, method, url, body, headers): + body = self.fixtures.load('r_orders.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_pricing_plans(self, method, url, body, headers): + body = self.fixtures.load('r_pricing_plans.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_distributions(self, method, url, body, headers): + body = self.fixtures.load('r_distributions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_new_vps(self, method, url, body, headers): + body = self.fixtures.load('r_orders_new_vps.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers): + body = self.fixtures.load( + 'r_orders_order_88833465_api_ivan_net_nz_vps.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state( + self, method, + url, body, + headers): + body = self.fixtures.load( + 'r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_softlayer.py libcloud-0.15.1/libcloud/test/compute/test_softlayer.py --- libcloud-0.5.0/libcloud/test/compute/test_softlayer.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_softlayer.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,193 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys + +from libcloud.common.types import InvalidCredsError + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import xmlrpclib +from libcloud.utils.py3 import next + +from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer +from libcloud.compute.drivers.softlayer import SoftLayerException, \ + NODE_STATE_MAP +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import SOFTLAYER_PARAMS + + +class SoftLayerTests(unittest.TestCase): + + def setUp(self): + SoftLayer.connectionCls.conn_classes = ( + SoftLayerMockHttp, SoftLayerMockHttp) + SoftLayerMockHttp.type = None + self.driver = SoftLayer(*SOFTLAYER_PARAMS) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + node = nodes[0] + self.assertEqual(node.name, 'libcloud-testing1.example.com') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.extra['password'], 'L3TJVubf') + + def test_initializing_state(self): + nodes = self.driver.list_nodes() + node = nodes[1] + self.assertEqual(node.state, NODE_STATE_MAP['INITIATING']) + + def test_list_locations(self): + locations = self.driver.list_locations() + dal = next(l for l in locations if l.id == 'dal05') + self.assertEqual(dal.country, 'US') + self.assertEqual(dal.id, 'dal05') + self.assertEqual(dal.name, 'Dallas - Central U.S.') + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(image.id, 'CENTOS_6_64') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 13) + + def test_create_node(self): + node = self.driver.create_node(name="libcloud-testing", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + self.assertEqual(node.name, 'libcloud-testing.example.com') + self.assertEqual(node.state, NODE_STATE_MAP['RUNNING']) + + def test_create_fail(self): + SoftLayerMockHttp.type = "SOFTLAYEREXCEPTION" + self.assertRaises( + SoftLayerException, + self.driver.create_node, + name="SOFTLAYEREXCEPTION", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_creds_error(self): + SoftLayerMockHttp.type = "INVALIDCREDSERROR" + self.assertRaises( + InvalidCredsError, + self.driver.create_node, + name="INVALIDCREDSERROR", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_node_no_location(self): + self.driver.create_node(name="Test", + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0]) + + def test_create_node_no_image(self): + self.driver.create_node(name="Test", size=self.driver.list_sizes()[0]) + + def test_create_node_san(self): + self.driver.create_node(name="Test", ex_local_disk=False) + + def test_create_node_domain_for_name(self): + self.driver.create_node(name="libcloud.org") + + def test_create_node_ex_options(self): + self.driver.create_node(name="Test", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + ex_domain='libcloud.org', + ex_cpus=2, + ex_ram=2048, + ex_disk=100, + ex_bandwidth=10, + ex_local_disk=False, + ex_datacenter='Dal05', + ex_os='UBUNTU_LATEST') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + +class SoftLayerMockHttp(MockHttp): + fixtures = ComputeFileFixtures('softlayer') + + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" + + def _xmlrpc(self, method, url, body, headers): + params, meth_name = xmlrpclib.loads(body) + url = url.replace("/", "_") + meth_name = "%s_%s" % (url, meth_name) + return getattr(self, meth_name)(method, url, body, headers) + + def _xmlrpc_v3_SoftLayer_Virtual_Guest_getCreateObjectOptions( + self, method, url, body, headers): + body = self.fixtures.load( + 'v3__SoftLayer_Virtual_Guest_getCreateObjectOptions.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Account_getVirtualGuests( + self, method, url, body, headers): + body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Location_Datacenter_getDatacenters( + self, method, url, body, headers): + body = self.fixtures.load( + 'v3_SoftLayer_Location_Datacenter_getDatacenters.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Virtual_Guest_createObject( + self, method, url, body, headers): + fixture = { + None: 'v3__SoftLayer_Virtual_Guest_createObject.xml', + 'INVALIDCREDSERROR': 'SoftLayer_Account.xml', + 'SOFTLAYEREXCEPTION': 'fail.xml', + }[self.type] + body = self.fixtures.load(fixture) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Virtual_Guest_getObject( + self, method, url, body, headers): + body = self.fixtures.load( + 'v3__SoftLayer_Virtual_Guest_getObject.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Virtual_Guest_rebootSoft( + self, method, url, body, headers): + body = self.fixtures.load('empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Virtual_Guest_deleteObject( + self, method, url, body, headers): + body = self.fixtures.load('empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_ssh_client.py libcloud-0.15.1/libcloud/test/compute/test_ssh_client.py --- libcloud-0.5.0/libcloud/test/compute/test_ssh_client.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_ssh_client.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import with_statement + +import os +import sys +import tempfile + +from libcloud import _init_once +from libcloud.test import LibcloudTestCase +from libcloud.test import unittest +from libcloud.compute.ssh import ParamikoSSHClient +from libcloud.compute.ssh import ShellOutSSHClient +from libcloud.compute.ssh import have_paramiko + +from libcloud.utils.py3 import StringIO + +from mock import patch, Mock + +if not have_paramiko: + ParamikoSSHClient = None # NOQA +else: + import paramiko + + +class ParamikoSSHClientTests(LibcloudTestCase): + + @patch('paramiko.SSHClient', Mock) + def setUp(self): + """ + Creates the object patching the actual connection. + """ + conn_params = {'hostname': 'dummy.host.org', + 'port': 8822, + 'username': 'ubuntu', + 'key': '~/.ssh/ubuntu_ssh', + 'timeout': '600'} + _, self.tmp_file = tempfile.mkstemp() + os.environ['LIBCLOUD_DEBUG'] = self.tmp_file + _init_once() + self.ssh_cli = ParamikoSSHClient(**conn_params) + + @patch('paramiko.SSHClient', Mock) + def test_create_with_password(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'password': 'ubuntu'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'password': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + @patch('paramiko.SSHClient', Mock) + def test_deprecated_key_argument(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key': 'id_rsa'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'key_filename': 'id_rsa', + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + def test_key_files_and_key_material_arguments_are_mutual_exclusive(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_files': 'id_rsa', + 'key_material': 'key'} + + expected_msg = ('key_files and key_material arguments are mutually ' + 'exclusive') + self.assertRaisesRegexp(ValueError, expected_msg, + ParamikoSSHClient, **conn_params) + + @patch('paramiko.SSHClient', Mock) + def test_key_material_argument(self): + path = os.path.join(os.path.dirname(__file__), + 'fixtures', 'misc', 'dummy_rsa') + + with open(path, 'r') as fp: + private_key = fp.read() + + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_material': private_key} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + pkey = paramiko.RSAKey.from_private_key(StringIO(private_key)) + expected_conn = {'username': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'pkey': pkey, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + @patch('paramiko.SSHClient', Mock) + def test_key_material_argument_invalid_key(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_material': 'id_rsa'} + + mock = ParamikoSSHClient(**conn_params) + + expected_msg = 'Invalid or unsupported key type' + self.assertRaisesRegexp(paramiko.ssh_exception.SSHException, + expected_msg, mock.connect) + + @patch('paramiko.SSHClient', Mock) + def test_create_with_key(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_files': 'id_rsa'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'key_filename': 'id_rsa', + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + @patch('paramiko.SSHClient', Mock) + def test_create_with_password_and_key(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'password': 'ubuntu', + 'key': 'id_rsa'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'password': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'key_filename': 'id_rsa', + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + @patch('paramiko.SSHClient', Mock) + def test_create_without_credentials(self): + """ + Initialize object with no credentials. + + Just to have better coverage, initialize the object + without 'password' neither 'key'. + """ + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'hostname': 'dummy.host.org', + 'allow_agent': True, + 'look_for_keys': True, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + + def test_basic_usage_absolute_path(self): + """ + Basic execution. + """ + mock = self.ssh_cli + # script to execute + sd = "/root/random_script.sh" + + # Connect behavior + mock.connect() + mock_cli = mock.client # The actual mocked object: SSHClient + expected_conn = {'username': 'ubuntu', + 'key_filename': '~/.ssh/ubuntu_ssh', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'timeout': '600', + 'port': 8822} + mock_cli.connect.assert_called_once_with(**expected_conn) + + mock.put(sd) + # Make assertions over 'put' method + mock_cli.open_sftp().chdir.assert_called_with('root') + mock_cli.open_sftp().file.assert_called_once_with('random_script.sh', + mode='w') + + mock.run(sd) + + # Make assertions over 'run' method + mock_cli.get_transport().open_session().exec_command \ + .assert_called_once_with(sd) + self.assertLogMsg('Executing command (cmd=/root/random_script.sh)') + self.assertLogMsg('Command finished') + + mock.close() + + def test_delete_script(self): + """ + Provide a basic test with 'delete' action. + """ + mock = self.ssh_cli + # script to execute + sd = '/root/random_script.sh' + + mock.connect() + + mock.delete(sd) + # Make assertions over the 'delete' method + mock.client.open_sftp().unlink.assert_called_with(sd) + self.assertLogMsg('Deleting file') + + mock.close() + self.assertLogMsg('Closing server connection') + + def assertLogMsg(self, expected_msg): + with open(self.tmp_file, 'r') as fp: + content = fp.read() + + self.assertTrue(content.find(expected_msg) != -1) + + +if not ParamikoSSHClient: + class ParamikoSSHClientTests(LibcloudTestCase): # NOQA + pass + + +class ShellOutSSHClientTests(LibcloudTestCase): + + def test_password_auth_not_supported(self): + try: + ShellOutSSHClient(hostname='localhost', username='foo', + password='bar') + except ValueError: + e = sys.exc_info()[1] + msg = str(e) + self.assertTrue('ShellOutSSHClient only supports key auth' in msg) + else: + self.fail('Exception was not thrown') + + def test_ssh_executable_not_available(self): + class MockChild(object): + returncode = 127 + + def communicate(*args, **kwargs): + pass + + def mock_popen(*args, **kwargs): + return MockChild() + + with patch('subprocess.Popen', mock_popen): + try: + ShellOutSSHClient(hostname='localhost', username='foo') + except ValueError: + e = sys.exc_info()[1] + msg = str(e) + self.assertTrue('ssh client is not available' in msg) + else: + self.fail('Exception was not thrown') + + def test_connect_success(self): + client = ShellOutSSHClient(hostname='localhost', username='root') + self.assertTrue(client.connect()) + + def test_close_success(self): + client = ShellOutSSHClient(hostname='localhost', username='root') + self.assertTrue(client.close()) + + def test_get_base_ssh_command(self): + client1 = ShellOutSSHClient(hostname='localhost', username='root') + client2 = ShellOutSSHClient(hostname='localhost', username='root', + key='/home/my.key') + client3 = ShellOutSSHClient(hostname='localhost', username='root', + key='/home/my.key', timeout=5) + + cmd1 = client1._get_base_ssh_command() + cmd2 = client2._get_base_ssh_command() + cmd3 = client3._get_base_ssh_command() + + self.assertEqual(cmd1, ['ssh', 'root@localhost']) + self.assertEqual(cmd2, ['ssh', '-i', '/home/my.key', + 'root@localhost']) + self.assertEqual(cmd3, ['ssh', '-i', '/home/my.key', + '-oConnectTimeout=5', 'root@localhost']) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_vcloud.py libcloud-0.15.1/libcloud/test/compute/test_vcloud.py --- libcloud-0.5.0/libcloud/test/compute/test_vcloud.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_vcloud.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,717 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import httplib, b + +from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject +from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess +from libcloud.compute.drivers.vcloud import VCloud_5_1_NodeDriver +from libcloud.compute.drivers.vcloud import Vdc +from libcloud.compute.base import Node, NodeImage +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import VCLOUD_PARAMS + + +class TerremarkTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VCloudNodeDriver.connectionCls.host = "test" + VCloudNodeDriver.connectionCls.conn_classes = (None, TerremarkMockHttp) + TerremarkMockHttp.type = None + self.driver = TerremarkDriver(*VCLOUD_PARAMS) + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual( + ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5') + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEqual(ret[0].ram, 512) + + def test_create_node(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ) + self.assertTrue(isinstance(node, Node)) + self.assertEqual( + node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') + self.assertEqual(node.name, 'testerpart2') + + def test_list_nodes(self): + ret = self.driver.list_nodes() + node = ret[0] + self.assertEqual( + node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') + self.assertEqual(node.name, 'testerpart2') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.public_ips, []) + self.assertEqual(node.private_ips, ['10.112.78.69']) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + +class VCloud_1_5_Tests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VCloudNodeDriver.connectionCls.host = 'test' + VCloudNodeDriver.connectionCls.conn_classes = ( + None, VCloud_1_5_MockHttp) + VCloud_1_5_MockHttp.type = None + self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS) + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual( + 'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id) + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEqual(ret[0].ram, 512) + + def test_networks(self): + ret = self.driver.networks + self.assertEqual( + ret[0].get('href'), 'https://vm-vcloud/api/network/dca8b667-6c8f-4c3e-be57-7a9425dba4f4') + + def test_create_node(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node( + name='testNode', + image=image, + size=size, + ex_vdc='MyVdc', + ex_network='vCloud - Default', + cpus=2, + ) + self.assertTrue(isinstance(node, Node)) + self.assertEqual( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id) + self.assertEqual('testNode', node.name) + + def test_create_node_clone(self): + image = self.driver.list_nodes()[0] + node = self.driver.create_node(name='testNode', image=image) + self.assertTrue(isinstance(node, Node)) + self.assertEqual( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id) + self.assertEqual('testNode', node.name) + + def test_list_nodes(self): + ret = self.driver.list_nodes() + node = ret[0] + self.assertEqual( + node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a') + self.assertEqual(node.name, 'testNode') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.public_ips, ['65.41.67.2']) + self.assertEqual(node.private_ips, ['65.41.67.2']) + self.assertEqual(node.extra, {'vdc': 'MyVdc', + 'vms': [{ + 'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e045', + 'name': 'testVm', + 'state': NodeState.RUNNING, + 'public_ips': ['65.41.67.2'], + 'private_ips': ['65.41.67.2'], + 'os_type': 'rhel5_64Guest' + }]}) + node = ret[1] + self.assertEqual( + node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b') + self.assertEqual(node.name, 'testNode2') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.public_ips, ['192.168.0.103']) + self.assertEqual(node.private_ips, ['192.168.0.100']) + self.assertEqual(node.extra, {'vdc': 'MyVdc', + 'vms': [{ + 'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e046', + 'name': 'testVm2', + 'state': NodeState.RUNNING, + 'public_ips': ['192.168.0.103'], + 'private_ips': ['192.168.0.100'], + 'os_type': 'rhel5_64Guest' + }]}) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_validate_vm_names(self): + # valid inputs + self.driver._validate_vm_names(['host-n-ame-name']) + self.driver._validate_vm_names(['tc-mybuild-b1']) + self.driver._validate_vm_names(None) + # invalid inputs + self.assertRaises( + ValueError, self.driver._validate_vm_names, ['invalid.host']) + self.assertRaises( + ValueError, self.driver._validate_vm_names, ['inv-alid.host']) + self.assertRaises( + ValueError, self.driver._validate_vm_names, ['hostnametoooolong']) + self.assertRaises( + ValueError, self.driver._validate_vm_names, ['host$name']) + + def test_change_vm_names(self): + self.driver._change_vm_names( + '/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', ['changed1', 'changed2']) + + def test_is_node(self): + self.assertTrue(self.driver._is_node( + Node('testId', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver))) + self.assertFalse(self.driver._is_node( + NodeImage('testId', 'testNode', driver=self.driver))) + + def test_ex_undeploy(self): + node = self.driver.ex_undeploy_node( + Node('https://test/api/vApp/undeployTest', 'testNode', state=0, + public_ips=[], private_ips=[], driver=self.driver)) + self.assertEqual(node.state, NodeState.STOPPED) + + def test_ex_undeploy_with_error(self): + node = self.driver.ex_undeploy_node( + Node('https://test/api/vApp/undeployErrorTest', 'testNode', + state=0, public_ips=[], private_ips=[], driver=self.driver)) + self.assertEqual(node.state, NodeState.STOPPED) + + def test_ex_find_node(self): + node = self.driver.ex_find_node('testNode') + self.assertEqual(node.name, "testNode") + node = self.driver.ex_find_node('testNode', self.driver.vdcs[0]) + self.assertEqual(node.name, "testNode") + node = self.driver.ex_find_node('testNonExisting', self.driver.vdcs[0]) + self.assertEqual(node, None) + + def test_ex_add_vm_disk__with_invalid_values(self): + self.assertRaises( + ValueError, self.driver.ex_add_vm_disk, 'dummy', 'invalid value') + self.assertRaises( + ValueError, self.driver.ex_add_vm_disk, 'dummy', '-1') + + def test_ex_add_vm_disk(self): + self.driver.ex_add_vm_disk('https://test/api/vApp/vm-test', '20') + + def test_ex_set_vm_cpu__with_invalid_values(self): + self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', 50) + self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', -1) + + def test_ex_set_vm_cpu(self): + self.driver.ex_set_vm_cpu('https://test/api/vApp/vm-test', 4) + + def test_ex_set_vm_memory__with_invalid_values(self): + self.assertRaises( + ValueError, self.driver.ex_set_vm_memory, 'dummy', 777) + self.assertRaises( + ValueError, self.driver.ex_set_vm_memory, 'dummy', -1024) + + def test_ex_set_vm_memory(self): + self.driver.ex_set_vm_memory('https://test/api/vApp/vm-test', 1024) + + def test_vdcs(self): + vdcs = self.driver.vdcs + self.assertEqual(len(vdcs), 1) + self.assertEqual( + vdcs[0].id, 'https://vm-vcloud/api/vdc/3d9ae28c-1de9-4307-8107-9356ff8ba6d0') + self.assertEqual(vdcs[0].name, 'MyVdc') + self.assertEqual(vdcs[0].allocation_model, 'AllocationPool') + self.assertEqual(vdcs[0].storage.limit, 5120000) + self.assertEqual(vdcs[0].storage.used, 1984512) + self.assertEqual(vdcs[0].storage.units, 'MB') + self.assertEqual(vdcs[0].cpu.limit, 160000) + self.assertEqual(vdcs[0].cpu.used, 0) + self.assertEqual(vdcs[0].cpu.units, 'MHz') + self.assertEqual(vdcs[0].memory.limit, 527360) + self.assertEqual(vdcs[0].memory.used, 130752) + self.assertEqual(vdcs[0].memory.units, 'MB') + + def test_ex_list_nodes(self): + self.assertEqual( + len(self.driver.ex_list_nodes()), len(self.driver.list_nodes())) + + def test_ex_list_nodes__masked_exception(self): + """ + Test that we don't mask other exceptions. + """ + brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver) + self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc)) + + def test_ex_power_off(self): + node = Node( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', + 'testNode', NodeState.RUNNING, [], [], self.driver) + self.driver.ex_power_off_node(node) + + def test_ex_query(self): + results = self.driver.ex_query( + 'user', filter='name==jrambo', page=2, page_size=30, sort_desc='startDate') + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['type'], 'UserRecord') + self.assertEqual(results[0]['name'], 'jrambo') + self.assertEqual(results[0]['isLdapUser'], 'true') + + def test_ex_get_control_access(self): + node = Node( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', + 'testNode', NodeState.RUNNING, [], [], self.driver) + control_access = self.driver.ex_get_control_access(node) + self.assertEqual( + control_access.everyone_access_level, ControlAccess.AccessLevel.READ_ONLY) + self.assertEqual(len(control_access.subjects), 1) + self.assertEqual(control_access.subjects[0].type, 'group') + self.assertEqual(control_access.subjects[0].name, 'MyGroup') + self.assertEqual(control_access.subjects[ + 0].id, 'https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413') + self.assertEqual(control_access.subjects[ + 0].access_level, ControlAccess.AccessLevel.FULL_CONTROL) + + def test_ex_set_control_access(self): + node = Node( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', + 'testNode', NodeState.RUNNING, [], [], self.driver) + control_access = ControlAccess(node, None, [Subject( + name='MyGroup', + type='group', + access_level=ControlAccess.AccessLevel.FULL_CONTROL)]) + self.driver.ex_set_control_access(node, control_access) + + def test_ex_get_metadata(self): + node = Node( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', + 'testNode', NodeState.RUNNING, [], [], self.driver) + metadata = self.driver.ex_get_metadata(node) + self.assertEqual(metadata, {'owners': 'msamia@netsuite.com'}) + + def test_ex_set_metadata_entry(self): + node = Node( + 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b', + 'testNode', NodeState.RUNNING, [], [], self.driver) + self.driver.ex_set_metadata_entry(node, 'foo', 'bar') + + +class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VCloudNodeDriver.connectionCls.host = 'test' + VCloudNodeDriver.connectionCls.conn_classes = ( + None, VCloud_1_5_MockHttp) + VCloud_1_5_MockHttp.type = None + self.driver = VCloudNodeDriver( + *VCLOUD_PARAMS, **{'api_version': '5.1'}) + + self.assertTrue(isinstance(self.driver, VCloud_5_1_NodeDriver)) + + def _test_create_node_valid_ex_vm_memory(self): + # TODO: Hook up the fixture + values = [4, 1024, 4096] + + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + + for value in values: + self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ex_vm_memory=value + ) + + def test_create_node_invalid_ex_vm_memory(self): + values = [1, 3, 7] + + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + + for value in values: + try: + self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ex_vm_memory=value + ) + except ValueError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual( + 'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id) + + +class TerremarkMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('terremark') + + def _api_v0_8_login(self, method, url, body, headers): + headers['set-cookie'] = 'vcloud-token=testtoken' + body = self.fixtures.load('api_v0_8_login.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_org_240(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_org_240.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vdc_224.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224_catalog(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_catalogItem_5(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_catalogItem_5.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers): + body = self.fixtures.load( + 'api_v0_8_vdc_224_action_instantiateVAppTemplate.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_task_10496(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_task_10496.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers): + body = self.fixtures.load( + 'api_v0_8_vapp_14031_power_action_powerOn.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('api_v0_8_vapp_14031_get.xml') + elif method == 'DELETE': + body = '' + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers): + body = self.fixtures.load( + 'api_v0_8_vapp_14031_power_action_poweroff.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_task_11001(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_task_11001.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + +class AnotherErrorMember(Exception): + + """ + helper class for the synthetic exception + """ + + def __init__(self): + self.tag = 'Error' + + def get(self, foo): + return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN_1' + + +class AnotherError(Exception): + pass + + +class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase): + + fixtures = ComputeFileFixtures('vcloud_1_5') + + def request(self, method, url, body=None, headers=None, raw=False): + self.assertTrue(url.startswith('/api/'), + ('"%s" is invalid. Needs to ' + 'start with "/api". The passed URL should be just ' + 'the path, not full URL.', url)) + super(VCloud_1_5_MockHttp, self).request(method, url, body, headers, + raw) + + def _api_sessions(self, method, url, body, headers): + headers['x-vcloud-authorization'] = 'testtoken' + body = self.fixtures.load('api_sessions.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_org(self, method, url, body, headers): + body = self.fixtures.load('api_org.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a(self, method, url, body, headers): + body = self.fixtures.load( + 'api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4(self, method, url, body, headers): + body = self.fixtures.load( + 'api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vdc_brokenVdc(self, method, url, body, headers): + body = self.fixtures.load('api_vdc_brokenVdc.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_errorRaiser(self, method, url, body, headers): + m = AnotherErrorMember() + raise AnotherError(m) + + def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn(self, method, url, body, headers): + return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) + + # Clone + def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_networkConnectionSection(self, method, url, body, headers): + body = self.fixtures.load( + 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a(self, method, url, body, headers): + status = httplib.OK + if method == 'GET': + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml') + status = httplib.OK + elif method == 'DELETE': + body = self.fixtures.load( + 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') + status = httplib.ACCEPTED + return status, body, headers, httplib.responses[status] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers): + body = self.fixtures.load( + 'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') + status = httplib.OK + else: + body = self.fixtures.load( + 'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml') + status = httplib.ACCEPTED + return status, body, headers, httplib.responses[status] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset(self, method, url, body, headers): + return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) + + def _api_task_b034df55_fe81_4798_bc81_1f0fd0ead450(self, method, url, body, headers): + body = self.fixtures.load( + 'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4(self, method, url, body, headers): + body = self.fixtures.load( + 'api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_catalogItem_3132e037_759b_4627_9056_ca66466fa607(self, method, url, body, headers): + body = self.fixtures.load( + 'api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_undeployTest(self, method, url, body, headers): + body = self.fixtures.load('api_vApp_undeployTest.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_undeployTest_action_undeploy(self, method, url, body, headers): + body = self.fixtures.load('api_task_undeploy.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_task_undeploy(self, method, url, body, headers): + body = self.fixtures.load('api_task_undeploy.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_undeployErrorTest(self, method, url, body, headers): + body = self.fixtures.load('api_vApp_undeployTest.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_undeployErrorTest_action_undeploy(self, method, url, body, headers): + if b('shutdown') in b(body): + body = self.fixtures.load('api_task_undeploy_error.xml') + else: + body = self.fixtures.load('api_task_undeploy.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_task_undeployError(self, method, url, body, headers): + body = self.fixtures.load('api_task_undeploy_error.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_access_to_resource_forbidden(self, method, url, body, headers): + raise Exception( + ET.fromstring(self.fixtures.load('api_vApp_vapp_access_to_resource_forbidden.xml'))) + + def _api_vApp_vm_test(self, method, url, body, headers): + body = self.fixtures.load('api_vApp_vm_test.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vm_test_virtualHardwareSection_disks(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'get_api_vApp_vm_test_virtualHardwareSection_disks.xml') + status = httplib.OK + else: + body = self.fixtures.load( + 'put_api_vApp_vm_test_virtualHardwareSection_disks.xml') + status = httplib.ACCEPTED + return status, body, headers, httplib.responses[status] + + def _api_vApp_vm_test_virtualHardwareSection_cpu(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'get_api_vApp_vm_test_virtualHardwareSection_cpu.xml') + status = httplib.OK + else: + body = self.fixtures.load( + 'put_api_vApp_vm_test_virtualHardwareSection_cpu.xml') + status = httplib.ACCEPTED + return status, body, headers, httplib.responses[status] + + def _api_vApp_vm_test_virtualHardwareSection_memory(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'get_api_vApp_vm_test_virtualHardwareSection_memory.xml') + status = httplib.OK + else: + body = self.fixtures.load( + 'put_api_vApp_vm_test_virtualHardwareSection_memory.xml') + status = httplib.ACCEPTED + return status, body, headers, httplib.responses[status] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_powerOff(self, method, url, body, headers): + return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers) + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(self, method, url, body, headers): + assert method == 'POST' + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + + def _api_query(self, method, url, body, headers): + assert method == 'GET' + if 'type=user' in url: + self.assertTrue('page=2' in url) + self.assertTrue('filter=(name==jrambo)' in url) + self.assertTrue('sortDesc=startDate') + body = self.fixtures.load('api_query_user.xml') + elif 'type=group' in url: + body = self.fixtures.load('api_query_group.xml') + else: + raise AssertionError('Unexpected query type') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers): + if method == 'POST': + body = self.fixtures.load('api_vapp_post_metadata.xml') + return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED] + else: + body = self.fixtures.load('api_vapp_get_metadata.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers): + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_controlAccess(self, method, url, body, headers): + body = str(body) + self.assertTrue(method == 'POST') + self.assertTrue( + 'false' in body) + self.assertTrue( + '' in body) + self.assertTrue('FullControl' in body) + body = self.fixtures.load( + 'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + def _api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413(self, method, url, body, headers): + body = self.fixtures.load( + 'api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml') + return httplib.OK, body, headers, httplib.responses[httplib.OK] + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_vcl.py libcloud-0.15.1/libcloud/test/compute/test_vcl.py --- libcloud-0.5.0/libcloud/test/compute/test_vcl.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_vcl.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import xmlrpclib + +from libcloud.compute.drivers.vcl import VCLNodeDriver as VCL +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import VCL_PARAMS + + +class VCLTests(unittest.TestCase): + + def setUp(self): + VCL.connectionCls.conn_classes = ( + VCLMockHttp, VCLMockHttp) + VCLMockHttp.type = None + self.driver = VCL(*VCL_PARAMS) + + def test_list_nodes(self): + node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] + self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.extra['pass'], 'ehkNGW') + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(image.id, '8') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 1) + + def test_create_node(self): + image = self.driver.list_images()[0] + node = self.driver.create_node(image=image) + self.assertEqual(node.id, '51') + + def test_destroy_node(self): + node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_ex_update_node_access(self): + node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] + node = self.driver.ex_update_node_access(node, ipaddr='192.168.1.2') + self.assertEqual(node.name, 'CentOS 5.4 Base (32 bit VM)') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.extra['pass'], 'ehkNGW') + + def test_ex_extend_request_time(self): + node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] + self.assertTrue(self.driver.ex_extend_request_time(node, 60)) + + def test_ex_get_request_end_time(self): + node = self.driver.list_nodes(ipaddr='192.168.1.1')[0] + self.assertEqual( + self.driver.ex_get_request_end_time(node), + 1334168100 + ) + + +class VCLMockHttp(MockHttp): + fixtures = ComputeFileFixtures('vcl') + + def _get_method_name(self, type, use_param, qs, path): + return "_xmlrpc" + + def _xmlrpc(self, method, url, body, headers): + params, meth_name = xmlrpclib.loads(body) + if self.type: + meth_name = "%s_%s" % (meth_name, self.type) + return getattr(self, meth_name)(method, url, body, headers) + + def XMLRPCgetImages(self, method, url, body, headers): + body = self.fixtures.load('XMLRPCgetImages.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCextendRequest(self, method, url, body, headers): + body = self.fixtures.load('XMLRPCextendRequest.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCgetRequestIds(self, method, url, body, headers): + body = self.fixtures.load( + 'XMLRPCgetRequestIds.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCgetRequestStatus(self, method, url, body, headers): + body = self.fixtures.load( + 'XMLRPCgetRequestStatus.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCendRequest(self, method, url, body, headers): + body = self.fixtures.load( + 'XMLRPCendRequest.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCaddRequest(self, method, url, body, headers): + body = self.fixtures.load( + 'XMLRPCaddRequest.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def XMLRPCgetRequestConnectData(self, method, url, body, headers): + body = self.fixtures.load( + 'XMLRPCgetRequestConnectData.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_voxel.py libcloud-0.15.1/libcloud/test/compute/test_voxel.py --- libcloud-0.5.0/libcloud/test/compute/test_voxel.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_voxel.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,170 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.base import Node, NodeSize, NodeImage, NodeLocation +from libcloud.compute.drivers.voxel import VoxelNodeDriver as Voxel +from libcloud.compute.types import InvalidCredsError + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures + +from libcloud.test.secrets import VOXEL_PARAMS + + +class VoxelTest(unittest.TestCase): + + def setUp(self): + + Voxel.connectionCls.conn_classes = (None, VoxelMockHttp) + VoxelMockHttp.type = None + self.driver = Voxel(*VOXEL_PARAMS) + + def test_auth_failed(self): + VoxelMockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + except Exception: + e = sys.exc_info()[1] + self.assertTrue(isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_response_failure(self): + VoxelMockHttp.type = 'FAILURE' + + try: + self.driver.list_nodes() + except Exception: + pass + else: + self.fail('Invalid response, but exception was not thrown') + + def test_list_nodes(self): + VoxelMockHttp.type = 'LIST_NODES' + nodes = self.driver.list_nodes() + + self.assertEqual(len(nodes), 1) + self.assertEqual(nodes[0].name, 'www.voxel.net') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 13) + + def test_list_images(self): + VoxelMockHttp.type = 'LIST_IMAGES' + images = self.driver.list_images() + + self.assertEqual(len(images), 1) + + def test_list_locations(self): + VoxelMockHttp.type = 'LIST_LOCATIONS' + locations = self.driver.list_locations() + + self.assertEqual(len(locations), 2) + self.assertEqual(locations[0].name, 'Amsterdam') + + def test_create_node_invalid_disk_size(self): + image = NodeImage( + id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + location = NodeLocation(id=1, name='Europe', country='England', + driver=self.driver) + + try: + self.driver.create_node(name='foo', image=image, size=size, + location=location) + except ValueError: + pass + else: + self.fail('Invalid disk size provided but an exception was not' + ' thrown') + + def test_create_node(self): + VoxelMockHttp.type = 'CREATE_NODE' + image = NodeImage( + id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', 1024, 500, None, None, driver=self.driver) + location = NodeLocation(id=1, name='Europe', country='England', + driver=self.driver) + + node = self.driver.create_node(name='foo', image=image, size=size, + location=location) + self.assertEqual(node.id, '1234') + + node = self.driver.create_node(name='foo', image=image, size=size, + location=location, voxel_access=True) + self.assertEqual(node.id, '1234') + + def test_reboot_node(self): + VoxelMockHttp.type = 'REBOOT_NODE' + node = Node( + id=72258, name=None, state=None, public_ips=None, private_ips=None, + driver=self.driver) + + self.assertTrue(node.reboot()) + + def test_destroy_node(self): + VoxelMockHttp.type = 'DESTROY_NODE' + node = Node( + id=72258, name=None, state=None, public_ips=None, private_ips=None, + driver=self.driver) + + self.assertTrue(node.destroy()) + + +class VoxelMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('voxel') + + def _UNAUTHORIZED(self, method, url, body, headers): + body = self.fixtures.load('unauthorized.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _FAILURE(self, method, url, body, headers): + body = self.fixtures.load('failure.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_NODES(self, method, url, body, headers): + body = self.fixtures.load('nodes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_IMAGES(self, method, url, body, headers): + body = self.fixtures.load('images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_LOCATIONS(self, method, url, body, headers): + body = self.fixtures.load('locations.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CREATE_NODE(self, method, url, body, headers): + body = self.fixtures.load('create_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _REBOOT_NODE(self, method, url, body, headers): + body = self.fixtures.load('success.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DESTROY_NODE(self, method, url, body, headers): + body = self.fixtures.load('success.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/compute/test_vpsnet.py libcloud-0.15.1/libcloud/test/compute/test_vpsnet.py --- libcloud-0.5.0/libcloud/test/compute/test_vpsnet.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/compute/test_vpsnet.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.vpsnet import VPSNetNodeDriver +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState + +from libcloud.test import MockHttp +from libcloud.test.compute import TestCaseMixin + +from libcloud.test.secrets import VPSNET_PARAMS +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class VPSNetTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VPSNetNodeDriver.connectionCls.conn_classes = (None, VPSNetMockHttp) + self.driver = VPSNetNodeDriver(*VPSNET_PARAMS) + + def test_create_node(self): + VPSNetMockHttp.type = 'create' + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node('foo', image, size) + self.assertEqual(node.name, 'foo') + + def test_list_nodes(self): + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + self.assertEqual(node.id, '1384') + self.assertEqual(node.state, NodeState.RUNNING) + + def test_reboot_node(self): + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + + VPSNetMockHttp.type = 'reboot' + ret = self.driver.reboot_node(node) + self.assertEqual(ret, True) + + def test_destroy_node(self): + VPSNetMockHttp.type = 'delete' + node = Node('2222', None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + VPSNetMockHttp.type = 'delete_fail' + node = Node('2223', None, None, None, None, self.driver) + self.assertRaises(Exception, self.driver.destroy_node, node) + + def test_list_images(self): + VPSNetMockHttp.type = 'templates' + ret = self.driver.list_images() + self.assertEqual(ret[0].id, '9') + self.assertEqual(ret[-1].id, '160') + + def test_list_sizes(self): + VPSNetMockHttp.type = 'sizes' + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0].id, '1') + self.assertEqual(ret[0].name, '1 Node') + + def test_destroy_node_response(self): + # should return a node object + node = Node('2222', None, None, None, None, self.driver) + VPSNetMockHttp.type = 'delete' + ret = self.driver.destroy_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_reboot_node_response(self): + # should return a node object + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + VPSNetMockHttp.type = 'reboot' + ret = self.driver.reboot_node(node) + self.assertTrue(isinstance(ret, bool)) + + +class VPSNetMockHttp(MockHttp): + fixtures = ComputeFileFixtures('vpsnet') + + def _nodes_api10json_sizes(self, method, url, body, headers): + body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _nodes_api10json_create(self, method, url, body, headers): + body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_2222_api10json_delete_fail(self, method, url, body, headers): + return (httplib.FORBIDDEN, '', {}, httplib.responses[httplib.FORBIDDEN]) + + def _virtual_machines_2222_api10json_delete(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _virtual_machines_1384_reboot_api10json_reboot(self, method, url, body, headers): + body = """{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "foo", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_api10json_create(self, method, url, body, headers): + body = """{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "foo", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_api10json_virtual_machines(self, method, url, body, headers): + body = """ [{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "Web Server 01", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }, + { + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1385, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "mysql01", + "consumer_id": 0, + "backups_enabled": false, + "password": "dsi8h38hd2s", + "label": "MySQL Server 01", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _available_clouds_api10json_templates(self, method, url, body, headers): + body = self.fixtures.load('_available_clouds_api10json_templates.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _available_clouds_api10json_create(self, method, url, body, headers): + body = """ + [{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"}],"id":2,"label":"USA VPS Cloud"}}] + """ + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/create_record.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/create_record.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/create_record.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/create_record.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,31 @@ + + + + + + + + id + 47234 + + + name + www + + + ttl + 0 + + + type + A + + + value + 127.0.0.1 + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/create_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/create_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/create_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/create_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + date_updated + 20101028T12:38:17 + + + domains + 0 + + + id + 47234 + + + name + t.com + + + owner + AB3917-GANDI + + + public + 0 + + + version + 1 + + + versions + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_record_doesnotexist.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 0 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_record.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_record.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_record.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_record.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 1 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_zone_fail.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 0 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/delete_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/delete_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 1 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/get_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/get_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/get_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/get_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + date_updated + 20101028T12:38:17 + + + domains + 0 + + + id + 47234 + + + name + t.com + + + owner + AB3917-GANDI + + + public + 0 + + + version + 1 + + + versions + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_records_empty.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_records_empty.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_records_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_records_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_records.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_records.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_records.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_records.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,88 @@ + + + + + + + + + + + id + 47234 + + + name + wibble + + + ttl + 86400 + + + type + CNAME + + + value + t.com + + + + + + + + id + 47234 + + + name + www + + + ttl + 86400 + + + type + A + + + value + 208.111.35.173 + + + + + + + + id + 47234 + + + name + blahblah + + + ttl + 86400 + + + type + A + + + value + 208.111.35.173 + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_zones.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_zones.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/list_zones.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/list_zones.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,138 @@ + + + + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 47234 + + + name + t.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48170 + + + name + newbug.net + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48017 + + + name + newblah.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 47288 + + + name + fromapi.com + + + public + 0 + + + version + 1 + + + + + + + + date_updated + 20101028T12:38:17 + + + id + 48008 + + + name + blahnew.com + + + public + 0 + + + version + 1 + + + + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/new_version.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/new_version.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/new_version.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/new_version.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 1 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/gandi/zone_doesnt_exist.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + faultCode + 581042 + + + faultString + Zone does not exist + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "notFound", + "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." + } + ], + "code": 404, + "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." + } +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/managed_zones_1.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/managed_zones_1.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/managed_zones_1.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/managed_zones_1.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud1.googledomains.com.", "ns-cloud2.googledomains.com.", "ns-cloud3.googledomains.com.", "ns-cloud4.googledomains.com."], "creationTime": "2014-03-29T23:06:00.921Z", "dnsName": "example.com.", "id": "1", "description": ""} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/no_record.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/no_record.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/no_record.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/no_record.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "kind": "dns#resourceRecordSetsListResponse", + "rrsets": [ + ] +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/record.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/record.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/record.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/record.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "kind": "dns#resourceRecordSetsListResponse", + "rrsets": [ + { + "kind": "dns#resourceRecordSet", + "name": "foo.example.com.", + "type": "A", + "ttl": 21600, + "rrdatas": [ + "ns-cloud-c1.googledomains.com.", + "ns-cloud-c2.googledomains.com.", + "ns-cloud-c3.googledomains.com.", + "ns-cloud-c4.googledomains.com." + ] + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/records_list.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/records_list.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/records_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/records_list.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"rrsets": [{"rrdatas": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "kind": "dns#resourceRecordSet", "type": "NS", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["ns-cloud-d1.googledomains.com. dns-admin.google.com. 0 21600 3600 1209600 300"], "kind": "dns#resourceRecordSet", "type": "SOA", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["1.2.3.4"], "kind": "dns#resourceRecordSet", "type": "A", "name": "foo.example.com.", "ttl": 3600}], "kind": "dns#resourceRecordSetsListResponse"} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone_create.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone_create.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone_create.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone_create.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-org", "nameServers": ["ns-cloud-b1.googledomains.com.", "ns-cloud-b2.googledomains.com.", "ns-cloud-b3.googledomains.com.", "ns-cloud-b4.googledomains.com."], "creationTime": "2014-03-30T04:44:20.834Z", "dnsName": "example.org.", "id": "3", "description": "new domain for example.org"} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone_list.json libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone_list.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/google/zone_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/google/zone_list.json 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1 @@ +{"kind": "dns#managedZonesListResponse", "managedZones": [{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""}, {"kind": "dns#managedZone", "name": "example-net", "nameServers": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "creationTime": "2014-03-29T22:45:46.990Z", "dnsName": "example.net.", "id": "2", "description": ""}]} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/get_record.json libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/get_record.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/get_record.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/get_record.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "id": "300377", + "name": "*.t.com", + "type": "CNAME", + "content": "t.com", + "ttl": "86400", + "prio": null +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/get_zone.json libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/get_zone.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/get_zone.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/get_zone.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "id": "47234", + "name": "t.com", + "type": "NATIVE", + "ttl": "3600", + "soa": { + "primary": "ns1.hostvirtual.com", + "hostmaster": "support@HOSTVIRTUAL.COM", + "serial": "2012100901", + "refresh": "10800", + "retry": "3600", + "expire": "604800", + "default_ttl": "3600" + }, + "ns": [ + "ns4.hostvirtual.com", + "ns3.hostvirtual.com", + "ns2.hostvirtual.com", + "ns1.hostvirtual.com" + ], + "records": [ + { + "id": "300377", + "name": "*.t.com", + "type": "CNAME", + "content": "t.com", + "ttl": "86400", + "prio": null + }, + { + "id": "300719", + "name": "blah.com.", + "type": "A", + "content": "0.0.0.0", + "ttl": null, + "prio": null + }, + { + "id": "300728", + "name": "blahblah.com.t.com", + "type": "A", + "content": "1.1.1.1", + "ttl": null, + "prio": "10" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/list_records.json libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/list_records.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/list_records.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/list_records.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,26 @@ +[ + { + "id": "300377", + "name": "*.t.com", + "type": "CNAME", + "content": "t.com", + "ttl": "86400", + "prio": null + }, + { + "id": "300719", + "name": "www.t.com", + "type": "A", + "content": "208.111.35.173", + "ttl": null, + "prio": null + }, + { + "id": "300728", + "name": "blahblah.t.com", + "type": "A", + "content": "208.111.35.173", + "ttl": null, + "prio": "10" + } +] diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/list_zones.json libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/list_zones.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/list_zones.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/list_zones.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "id": "47234", + "name": "t.com", + "type": "NATIVE", + "ttl": "3600" + }, + { + "id": "48170", + "name": "newbug.net", + "type": "NATIVE", + "ttl": "3600" + }, + { + "id": "48017", + "name": "newblah.com", + "type": "NATIVE", + "ttl": "3600" + }, + { + "id": "47288", + "name": "fromapi.com", + "type": "NATIVE", + "ttl": "3600" + }, + { + "id": "48008", + "name": "blahnew.com", + "type": "NATIVE", + "ttl": "3600" + } +] diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/hostvirtual/zone_does_not_exist.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "error": { + "code": 404, + "message": "Not Found: id, validate_dns_zone_owner" + } +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_domain.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_domain.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_domain.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_domain.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "ACTION": "domain.create", + "DATA": { + "DomainID": 5094 + } +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_domain_validation_error.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_domain_validation_error.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_domain_validation_error.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_domain_validation_error.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 8, + "ERRORMESSAGE": "The domain 'linode.com' already exists in our database. Please open a ticket if you think this is in error." + } + ], + "DATA": {}, + "ACTION": "domain.create" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_resource.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_resource.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/create_resource.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/create_resource.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "DATA": { + "ResourceID": 3585100 + }, + "ACTION": "domain.resource.create" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_domain_does_not_exist.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 5, + "ERRORMESSAGE": "Object not found" + } + ], + "DATA": {}, + "ACTION": "domain.delete" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_domain.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_domain.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_domain.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_domain.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "ACTION": "domain.delete", + "DATA": { + "DomainID": 5123 + } +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_resource_does_not_exist.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 5, + "ERRORMESSAGE": "Object not found" + } + ], + "DATA": {}, + "ACTION": "domain.resource.delete" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_resource.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_resource.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/delete_resource.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/delete_resource.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "DATA": { + "ResourceID": 3585141 + }, + "ACTION": "domain.resource.delete" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/domain_list.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/domain_list.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/domain_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/domain_list.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "ERRORARRAY": [], + "ACTION": "domain.list", + "DATA": [ + { + "DOMAINID": 5093, + "DESCRIPTION": "", + "EXPIRE_SEC": 0, + "RETRY_SEC": 0, + "STATUS": 1, + "LPM_DISPLAYGROUP": "thing", + "MASTER_IPS": "", + "REFRESH_SEC": 0, + "SOA_EMAIL": "dns@example.com", + "TTL_SEC": 0, + "DOMAIN": "linode.com", + "AXFR_IPS": "none", + "TYPE": "master" + }, + { + "DOMAINID": 5094, + "DESCRIPTION": "", + "EXPIRE_SEC": 0, + "RETRY_SEC": 0, + "STATUS": 1, + "LPM_DISPLAYGROUP": "", + "MASTER_IPS": "2600:3c03::f03c:91ff:feae:e071;66.228.43.47;", + "REFRESH_SEC": 0, + "SOA_EMAIL": "", + "TTL_SEC": 0, + "DOMAIN": "0.c.d.7.0.6.0.f.1.0.7.4.0.1.0.0.2.ip6.arpa", + "AXFR_IPS": "2600:3c03::f03c:91ff:feae:e071;66.228.43.47;", + "TYPE": "slave" + } + ] +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_record_does_not_exist.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 5, + "ERRORMESSAGE": "Object not found" + } + ], + "DATA": {}, + "ACTION": "domain.resource.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_record.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_record.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_record.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_record.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "DOMAINID": 5093, + "PORT": 80, + "RESOURCEID": 3585100, + "NAME": "www", + "WEIGHT": 5, + "TTL_SEC": 0, + "TARGET": "127.0.0.1", + "PRIORITY": 10, + "PROTOCOL": "", + "TYPE": "a" + } + ], + "ACTION": "domain.resource.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_zone_does_not_exist.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 5, + "ERRORMESSAGE": "Object not found" + } + ], + "DATA": {}, + "ACTION": "domain.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_zone.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_zone.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/get_zone.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/get_zone.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "DOMAINID": 5093, + "DESCRIPTION": "", + "EXPIRE_SEC": 0, + "RETRY_SEC": 0, + "STATUS": 1, + "LPM_DISPLAYGROUP": "thing", + "MASTER_IPS": "", + "REFRESH_SEC": 0, + "SOA_EMAIL": "dns@example.com", + "TTL_SEC": 0, + "DOMAIN": "linode.com", + "AXFR_IPS": "none", + "TYPE": "master" + } + ], + "ACTION": "domain.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/resource_list_does_not_exist.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "ERRORARRAY": [ + { + "ERRORCODE": 5, + "ERRORMESSAGE": "Object not found" + } + ], + "DATA": {}, + "ACTION": "domain.resource.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/resource_list.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/resource_list.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/resource_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/resource_list.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,30 @@ +{ + "ERRORARRAY": [], + "DATA": [ + { + "DOMAINID": 5093, + "PORT": 80, + "RESOURCEID": 3585100, + "NAME": "mc", + "WEIGHT": 5, + "TTL_SEC": 0, + "TARGET": "127.0.0.1", + "PRIORITY": 10, + "PROTOCOL": "", + "TYPE": "a" + }, + { + "DOMAINID": 5093, + "PORT": 25565, + "RESOURCEID": 3585141, + "NAME": "_minecraft._udp", + "WEIGHT": 5, + "TTL_SEC": 0, + "TARGET": "mc.linode.com", + "PRIORITY": 10, + "PROTOCOL": "udp", + "TYPE": "srv" + } + ], + "ACTION": "domain.resource.list" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/update_domain.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/update_domain.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/update_domain.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/update_domain.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "DATA": { + "DomainID": 5093 + }, + "ACTION": "domain.update" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/linode/update_resource.json libcloud-0.15.1/libcloud/test/dns/fixtures/linode/update_resource.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/linode/update_resource.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/linode/update_resource.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "ERRORARRAY": [], + "DATA": { + "ResourceID": 3585100 + }, + "ACTION": "domain.resource.update" +} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/auth_1_1.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/auth_1_1.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/auth_1_1.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/auth_1_1.json 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "auth":{ + "token":{ + "id":"fooo-bar-fooo-bar-fooo-bar", + "expires":"2031-10-29T17:39:28.000-05:00" + }, + "serviceCatalog":{ + "cloudFilesCDN":[ + { + "region":"ORD", + "publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS_f66473fb-2e1e-4a44-barr-foooooo", + "v1Default":true + } + ], + "cloudFiles":[ + { + "region":"ORD", + "publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr", + "v1Default":true, + "internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr" + } + ], + "cloudServers":[ + { + "publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/11111", + "v1Default":true + } + ] + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/auth_2_0.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/auth_2_0.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/auth_2_0.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/auth_2_0.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,159 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.ord1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.lon1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "1337", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "1337", + "publicURL": "https://127.0.0.1/v2/1337", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "613469", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "ORD", + "tenantId": "613469", + "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", + "versionList": "https://ord.servers.api.rackspacecloud.com/", + "versionId": "2" + } + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "1337", + "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + }, + { + "name": "cloudDNS", + "endpoints": [ + { + "tenantId": "11111", + "publicURL": "https://dns.api.rackspacecloud.com/v1.0/11111" + } + ], + "type": "rax:dns" + }, + { + "name": "cloudLoadBalancers", + "endpoints": [ + { + "region": "SYD", + "tenantId": "11111", + "publicURL": "https://syd.loadbalancers.api.rackspacecloud.com/v1.0/11111" + }, + { + "region": "DFW", + "tenantId": "11111", + "publicURL": "https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111" + }, + { + "region": "ORD", + "tenantId": "11111", + "publicURL": "https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111" + } + ], + "type": "rax:load-balancer" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_record_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_record_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_record_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_record_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "request":"{\"records\": [{\"data\": \"127.1.1.1\", \"type\": \"A\", \"name\": \"www.foo4.bar.com\"}]}", + "response":{ + "records":[ + { + "name":"www.foo4.bar.com", + "id":"A-7423317", + "type":"A", + "data":"127.1.1.1", + "updated":"2011-10-29T20:50:41.000+0000", + "ttl":3600, + "created":"2011-10-29T20:50:41.000+0000" + } + ] + }, + "status":"COMPLETED", + "verb":"POST", + "jobId":"586605c8-5739-43fb-8939-f3a2c4c0e99c", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/586605c8-5739-43fb-8939-f3a2c4c0e99c", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_zone_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_zone_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_zone_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_zone_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "request":"{\"domains\": [{\"recordsList\": {\"records\": []}, \"emailAddress\": \"test@test.com\", \"name\": \"bar.foo1.com\"}]}", + "response":{ + "domains":[ + { + "name":"bar.foo1.com", + "id":2946173, + "accountId":11111, + "updated":"2011-10-29T20:28:59.000+0000", + "ttl":3600, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T20:28:59.000+0000" + } + ] + }, + "status":"COMPLETED", + "verb":"POST", + "jobId":"288795f9-e74d-48be-880b-a9e36e0de61e", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/288795f9-e74d-48be-880b-a9e36e0de61e", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/create_zone_validation_error.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"validationErrors":{"messages":["Domain TTL is required and must be greater than or equal to 300"]},"code":400} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/delete_record_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/delete_record_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/delete_record_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/delete_record_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "status":"COMPLETED", + "verb":"DELETE", + "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181/records/2346" +} + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/delete_zone_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/delete_zone_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/delete_zone_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/delete_zone_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "status":"COMPLETED", + "verb":"DELETE", + "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/does_not_exist.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/does_not_exist.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/does_not_exist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/does_not_exist.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"message":"Object not Found.","code":404,"details":""} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/get_record_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/get_record_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/get_record_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/get_record_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/get_zone_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/get_zone_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/get_zone_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/get_zone_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,51 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[ + { + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717885", + "type":"NS", + "data":"dns1.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717886", + "type":"NS", + "data":"dns2.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + } + ], + "totalEntries":3 + }, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_records_no_results.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_records_no_results.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_records_no_results.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_records_no_results.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[], + "totalEntries":0 + }, + "emailAddress":"kami@kami.si", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_records_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_records_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_records_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_records_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,51 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[ + { + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717885", + "type":"NS", + "data":"dns1.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717886", + "type":"NS", + "data":"dns2.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + } + ], + "totalEntries":3 + }, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_zones_no_results.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "domains":[], + "totalEntries":0 +} + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_zones_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_zones_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/list_zones_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/list_zones_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,53 @@ +{ + "domains":[ + { + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo5.bar.com", + "id":2946065, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:48:39.000+0000", + "created":"2011-10-29T14:48:39.000+0000" + }, + { + "name":"foo6.bar.com", + "id":2946066, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:48:59.000+0000", + "created":"2011-10-29T14:48:58.000+0000" + }, + { + "name":"foo7.bar.com", + "id":2946068, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:49:14.000+0000", + "created":"2011-10-29T14:49:13.000+0000" + }, + { + "name":"foo8.bar.com", + "id":2946069, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:49:44.000+0000", + "created":"2011-10-29T14:49:43.000+0000" + }, + { + "name":"foo9.bar.com", + "id":2946071, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:54:45.000+0000", + "created":"2011-10-29T14:54:45.000+0000" + } + ], + "totalEntries":6 +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/unauthorized.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/unauthorized.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/unauthorized.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/unauthorized.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"unauthorized":{"message":"Username or api key is invalid","code":401}} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/update_record_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/update_record_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/update_record_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/update_record_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "request":"{\"comment\": \"lulz\", \"data\": \"127.3.3.3\", \"name\": \"www.bar.foo1.com\", \"ttl\": 777}", + "status":"COMPLETED", + "verb":"PUT", + "jobId":"251c0d0c-95bc-4e09-b99f-4b8748b66246", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/251c0d0c-95bc-4e09-b99f-4b8748b66246", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records/A-7423317" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/update_zone_success.json libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/update_zone_success.json --- libcloud-0.5.0/libcloud/test/dns/fixtures/rackspace/update_zone_success.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/rackspace/update_zone_success.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "request":"{}", + "status":"COMPLETED", + "verb":"PUT", + "jobId":"116a8f17-38ac-4862-827c-506cd04800d5", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/116a8f17-38ac-4862-827c-506cd04800d5", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946173" +} diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/create_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/create_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/create_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/create_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,20 @@ + + + + /hostedzone/47234 + t.com + some unique reference + + some comment + + 0 + + + + ns1.example.com + ns2.example.com + ns3.example.com + ns4.example.com + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/get_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/get_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/get_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/get_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + /hostedzone/47234 + t.com + some unique reference + + some comment + + 0 + + + + ns1.example.com + ns2.example.com + ns3.example.com + ns4.example.com + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/invalid_change_batch.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/invalid_change_batch.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/invalid_change_batch.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/invalid_change_batch.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + Sender + InvalidChangeBatch + Invalid change + + 376c64a6-6194-11e1-847f-ddaa49e4c811 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/list_records.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/list_records.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/list_records.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/list_records.xml 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,77 @@ + + + + + + wibble.t.com + CNAME + 86400 + + + t.com + + + + + + www.t.com + A + 86400 + + + 208.111.35.173 + + + + + + blahblah.t.com + A + 86400 + + + 208.111.35.173 + + + + + + testdomain.com + MX + 3600 + + + 1 ASPMX.L.GOOGLE.COM. + + + 5 ALT1.ASPMX.L.GOOGLE.COM. + + + 5 ALT2.ASPMX.L.GOOGLE.COM. + + + 10 ASPMX2.GOOGLEMAIL.COM. + + + 10 ASPMX3.GOOGLEMAIL.COM. + + + + + + foo.test.com. + SRV + 300 + + + 1 10 5269 xmpp-server.example.com. + + + 2 12 5060 sip-server.example.com. + + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/list_zones.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/list_zones.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/list_zones.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/list_zones.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,54 @@ + + + + + /hostedzone/47234 + t.com + unique description + + some comment + + 0 + + + + /hostedzone/48170 + newbug.net + unique description + + some comment + + 0 + + + + /hostedzone/48017 + newblah.com + unique description + + some comment + + 0 + + + + /hostedzone/47288 + fromapi.com + unique description + + some comment + + 0 + + + + /hostedzone/48008 + blahnew.com + unique description + + some comment + + 0 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/record_does_not_exist.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/record_does_not_exist.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/record_does_not_exist.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/record_does_not_exist.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + definitely.not.what.you.askedfor.t.com + CNAME + 86400 + + + t.com + + + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml libcloud-0.15.1/libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/route53/zone_does_not_exist.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + Sender + NoSuchHostedZone + No hosted zone found with ID: 47234 + + 376c64a6-6194-11e1-847f-ddaa49e4c811 + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_record.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_record.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_record.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_record.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,13 @@ + + 2008-12-07T02:51:13Z + 127.0.0.1 + www.example.com + A + www + 23456780 + + + + 2008-12-07T02:51:13Z + 12345678 + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_zone_validation_error.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + Ns type is not included in the list + Default ttl must be greater than or equal to 60 + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/create_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/create_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + 2008-12-07T02:40:02Z + ns1.example.com,ns2.example.com + true + 600 + foo.bar.com + dnsadmin@example.com + 12345679 + + + pri_sec + + + + 2008-12-07T02:40:02Z + 0 + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/get_record.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/get_record.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/get_record.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/get_record.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,13 @@ + + 2008-12-07T02:51:13Z + 172.16.16.1 + example.com + A + www + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/get_zone.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/get_zone.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/get_zone.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/get_zone.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,32 @@ + + 2008-12-07T02:40:02Z + ns1.example.com,ns2.example.com + true + 600 + example.com + dnsadmin@example.com + 12345678 + + + pri_sec + + + one two + 2008-12-07T02:40:02Z + 1 + + + 2008-12-07T02:51:13Z + 172.16.16.1 + example.com + A + + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_records_no_results.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_records.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_records.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_records.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_records.xml 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,54 @@ + + + 2008-12-07T02:51:13Z + 172.16.16.1 + www.example.com + A + www + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + + + 2008-12-07T02:51:13Z + 172.16.16.2 + test.example.com + A + test + 23456789 + + + 3600 + 2008-12-07T02:51:13Z + 12345678 + + + 2008-12-07T02:51:13Z + 172.16.16.3 + test2.example.com + A + + 23456789 + + + 3600 + 2008-12-07T02:51:13Z + 12345678 + + + 2008-12-07T02:51:13Z + 172.16.16.4 + test4.example.com + A + 23456789 + + + 3600 + 2008-12-07T02:51:13Z + 12345678 + + + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_zones_no_results.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_zones.xml libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_zones.xml --- libcloud-0.5.0/libcloud/test/dns/fixtures/zerigo/list_zones.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/fixtures/zerigo/list_zones.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,17 @@ + + + 2008-12-07T02:40:02Z + + false + 600 + example.com + + 12345678 + test foo bar + + pri_sec + + + 2008-12-07T02:40:02Z + + diff -Nru libcloud-0.5.0/libcloud/test/dns/test_base.py libcloud-0.15.1/libcloud/test/dns/test_base.py --- libcloud-0.5.0/libcloud/test/dns/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_base.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +from __future__ import with_statement + +import sys +import tempfile + +from mock import Mock + +from libcloud.test import unittest +from libcloud.dns.base import DNSDriver, Zone, Record +from libcloud.dns.types import RecordType + + +MOCK_RECORDS_VALUES = [ + {'id': 1, 'name': 'www', 'type': RecordType.A, 'data': '127.0.0.1'}, + {'id': 2, 'name': 'www', 'type': RecordType.AAAA, + 'data': '2a01:4f8:121:3121::2'}, + + # Custom TTL + {'id': 3, 'name': 'www', 'type': RecordType.A, 'data': '127.0.0.1', + 'extra': {'ttl': 123}}, + + # Record without a name + {'id': 4, 'name': '', 'type': RecordType.A, + 'data': '127.0.0.1'}, + + {'id': 5, 'name': 'test1', 'type': RecordType.TXT, + 'data': 'test foo bar'}, + + # TXT record with quotes + {'id': 5, 'name': 'test2', 'type': RecordType.TXT, + 'data': 'test "foo" "bar"'}, + + # Records with priority + {'id': 5, 'name': '', 'type': RecordType.MX, + 'data': 'mx.example.com', 'extra': {'priority': 10}}, + {'id': 5, 'name': '', 'type': RecordType.SRV, + 'data': '10 3333 example.com', 'extra': {'priority': 20}}, +] + + +class BaseTestCase(unittest.TestCase): + def setUp(self): + self.driver = DNSDriver('none', 'none') + self.tmp_file = tempfile.mkstemp() + self.tmp_path = self.tmp_file[1] + + def test_export_zone_to_bind_format_slave_should_throw(self): + zone = Zone(id=1, domain='example.com', type='slave', ttl=900, + driver=self.driver) + self.assertRaises(ValueError, zone.export_to_bind_format) + + def test_export_zone_to_bind_format_success(self): + zone = Zone(id=1, domain='example.com', type='master', ttl=900, + driver=self.driver) + + mock_records = [] + + for values in MOCK_RECORDS_VALUES: + values = values.copy() + values['driver'] = self.driver + values['zone'] = zone + record = Record(**values) + mock_records.append(record) + + self.driver.list_records = Mock() + self.driver.list_records.return_value = mock_records + + result = self.driver.export_zone_to_bind_format(zone=zone) + self.driver.export_zone_to_bind_zone_file(zone=zone, + file_path=self.tmp_path) + + with open(self.tmp_path, 'r') as fp: + content = fp.read() + + lines1 = result.split('\n') + lines2 = content.split('\n') + + for lines in [lines1, lines2]: + self.assertEqual(len(lines), 2 + 1 + 9) + self.assertRegexpMatches(lines[1], r'\$ORIGIN example\.com\.') + self.assertRegexpMatches(lines[2], r'\$TTL 900') + + self.assertRegexpMatches(lines[4], r'www.example.com\.\s+900\s+IN\s+A\s+127\.0\.0\.1') + self.assertRegexpMatches(lines[5], r'www.example.com\.\s+900\s+IN\s+AAAA\s+2a01:4f8:121:3121::2') + self.assertRegexpMatches(lines[6], r'www.example.com\.\s+123\s+IN\s+A\s+127\.0\.0\.1') + self.assertRegexpMatches(lines[7], r'example.com\.\s+900\s+IN\s+A\s+127\.0\.0\.1') + self.assertRegexpMatches(lines[8], r'test1.example.com\.\s+900\s+IN\s+TXT\s+"test foo bar"') + self.assertRegexpMatches(lines[9], r'test2.example.com\.\s+900\s+IN\s+TXT\s+"test \\"foo\\" \\"bar\\""') + self.assertRegexpMatches(lines[10], r'example.com\.\s+900\s+IN\s+MX\s+10\s+mx.example.com') + self.assertRegexpMatches(lines[11], r'example.com\.\s+900\s+IN\s+SRV\s+20\s+10 3333 example.com') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_gandi.py libcloud-0.15.1/libcloud/test/dns/test_gandi.py --- libcloud-0.5.0/libcloud/test/dns/test_gandi.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_gandi.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,296 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.gandi import GandiDNSDriver +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_GANDI +from libcloud.test.common.test_gandi import BaseGandiMockHttp + + +class GandiTests(unittest.TestCase): + + def setUp(self): + GandiDNSDriver.connectionCls.conn_classes = ( + GandiMockHttp, GandiMockHttp) + GandiMockHttp.type = None + self.driver = GandiDNSDriver(*DNS_GANDI) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 10) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 5) + + zone = zones[0] + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 3) + + record = records[1] + self.assertEqual(record.name, 'www') + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '208.111.35.173') + + def test_get_zone(self): + zone = self.driver.get_zone(zone_id='47234') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_get_record(self): + record = self.driver.get_record(zone_id='47234', + record_id='CNAME:t.com') + self.assertEqual(record.name, 'wibble') + self.assertEqual(record.type, RecordType.CNAME) + self.assertEqual(record.data, 't.com') + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_does_not_exist(self): + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='47234') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '47234') + else: + self.fail('Exception was not thrown') + + def test_get_record_zone_does_not_exist(self): + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4444', record_id='CNAME:t.com') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + GandiMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='47234', + record_id='CNAME:t.com') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone(self): + zone = self.driver.create_zone(domain='t.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.domain, 't.com') + + def test_update_zone(self): + zone = self.driver.get_zone(zone_id='47234') + zone = self.driver.update_zone(zone, domain='t.com') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_create_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record( + name='www', zone=zone, + type=RecordType.A, data='127.0.0.1', + extra={'ttl': 30} + ) + + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[1] + + params = { + 'record': record, + 'name': 'www', + 'type': RecordType.A, + 'data': '127.0.0.1', + 'extra': {'ttl': 30}} + updated_record = self.driver.update_record(**params) + + self.assertEqual(record.data, '208.111.35.173') + + self.assertEqual(updated_record.id, 'A:www') + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.A) + self.assertEqual(updated_record.data, '127.0.0.1') + + def test_delete_zone(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + GandiMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + GandiMockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class GandiMockHttp(BaseGandiMockHttp): + fixtures = DNSFileFixtures('gandi') + + def _xmlrpc__domain_zone_create(self, method, url, body, headers): + body = self.fixtures.load('create_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_update(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_list(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_add(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_update(self, method, url, body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set(self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_list_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_delete_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('zone_doesnt_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_list_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('list_records_empty.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_info_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_record_delete_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('delete_record_doesnotexist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_new_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc__domain_zone_version_set_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('new_version.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_google.py libcloud-0.15.1/libcloud/test/dns/test_google.py --- libcloud-0.5.0/libcloud/test/dns/test_google.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_google.py 2014-06-11 14:28:05.000000000 +0000 @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.dns.types import ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.google import GoogleDNSDriver +from libcloud.common.google import (GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleBaseConnection) + +from libcloud.test.common.test_google import GoogleAuthMockHttp +from libcloud.test import MockHttpTestCase, LibcloudTestCase +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_GOOGLE, DNS_KEYWORD_PARAMS_GOOGLE + + +class GoogleTests(LibcloudTestCase): + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + + def setUp(self): + GoogleDNSMockHttp.test = self + GoogleDNSDriver.connectionCls.conn_classes = (GoogleDNSMockHttp, + GoogleDNSMockHttp) + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + GoogleDNSMockHttp.type = None + kwargs = DNS_KEYWORD_PARAMS_GOOGLE.copy() + kwargs['auth_type'] = 'IA' + self.driver = GoogleDNSDriver(*DNS_PARAMS_GOOGLE, **kwargs) + + def test_default_scopes(self): + self.assertEqual(self.driver.scopes, None) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 2) + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 3) + + def test_get_zone(self): + zone = self.driver.get_zone('example-com') + self.assertEqual(zone.id, 'example-com') + self.assertEqual(zone.domain, 'example.com.') + + def test_get_zone_does_not_exist(self): + GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone('example-com') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, 'example-com') + else: + self.fail('Exception not thrown') + + def test_get_record(self): + GoogleDNSMockHttp.type = 'FILTER_ZONES' + zone = self.driver.list_zones()[0] + record = self.driver.get_record(zone.id, "A:foo.example.com.") + self.assertEqual(record.id, 'A:foo.example.com.') + self.assertEqual(record.name, 'foo.example.com.') + self.assertEqual(record.type, 'A') + self.assertEqual(record.zone.id, 'example-com') + + def test_get_record_zone_does_not_exist(self): + GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record('example-com', 'a:a') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, 'example-com') + else: + self.fail('Exception not thrown') + + def test_get_record_record_does_not_exist(self): + GoogleDNSMockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.get_record('example-com', "A:foo") + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, 'A:foo') + else: + self.fail('Exception not thrown') + + def test_create_zone(self): + extra = {'description': 'new domain for example.org'} + zone = self.driver.create_zone('example.org.', extra) + self.assertEqual(zone.domain, 'example.org.') + self.assertEqual(zone.extra['description'], extra['description']) + self.assertEqual(len(zone.extra['nameServers']), 4) + + def test_delete_zone(self): + zone = self.driver.get_zone('example-com') + res = self.driver.delete_zone(zone) + self.assertTrue(res) + + +class GoogleDNSMockHttp(MockHttpTestCase): + fixtures = DNSFileFixtures('google') + + def _dns_v1beta1_projects_project_name_managedZones(self, method, url, + body, headers): + if method == 'POST': + body = self.fixtures.load('zone_create.json') + else: + body = self.fixtures.load('zone_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('zone_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets( + self, method, url, body, headers): + body = self.fixtures.load('records_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com( + self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('managed_zones_1.json') + elif method == 'DELETE': + body = None + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_ZONE_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('get_zone_does_not_exists.json') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('managed_zones_1.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('no_record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_ZONE_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('get_zone_does_not_exists.json') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _dns_v1beta1_projects_project_name_managedZones_example_com_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_hostvirtual.py libcloud-0.15.1/libcloud/test/dns/test_hostvirtual.py --- libcloud-0.5.0/libcloud/test/dns/test_hostvirtual.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_hostvirtual.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,258 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.hostvirtual import HostVirtualDNSDriver +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_HOSTVIRTUAL + + +class HostVirtualTests(unittest.TestCase): + def setUp(self): + HostVirtualDNSDriver.connectionCls.conn_classes = ( + None, HostVirtualMockHttp) + HostVirtualMockHttp.type = None + self.driver = HostVirtualDNSDriver(*DNS_PARAMS_HOSTVIRTUAL) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 7) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 5) + + zone = zones[0] + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + self.assertEqual(zone.ttl, '3600') + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 3) + + record = records[1] + self.assertEqual(record.name, 'www.t.com') + self.assertEqual(record.id, '300719') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '208.111.35.173') + + def test_get_zone(self): + zone = self.driver.get_zone(zone_id='47234') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + self.assertEqual(zone.ttl, '3600') + + def test_get_record(self): + record = self.driver.get_record(zone_id='47234', record_id='300377') + self.assertEqual(record.id, '300377') + self.assertEqual(record.name, '*.t.com') + self.assertEqual(record.type, RecordType.CNAME) + self.assertEqual(record.data, 't.com') + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_does_not_exist(self): + HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_zone_does_not_exist(self): + HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='47234', record_id='4444') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone(self): + zone = self.driver.create_zone(domain='t.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.domain, 't.com') + + def test_update_zone(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, domain='tt.com') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'tt.com') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, '3600') + + def test_create_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record( + name='www', zone=zone, + type=RecordType.A, data='127.0.0.1' + ) + + self.assertEqual(record.id, '300377') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[1] + updated_record = self.driver.update_record(record=record, name='www', + type=RecordType.AAAA, + data='::1') + self.assertEqual(record.data, '208.111.35.173') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.AAAA) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class HostVirtualMockHttp(MockHttp): + fixtures = DNSFileFixtures('hostvirtual') + + def _dns_zone(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_zones(self, method, url, body, headers): + body = self.fixtures.load('list_zones.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_record(self, method, url, body, headers): + body = self.fixtures.load('get_record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_records(self, method, url, body, headers): + body = self.fixtures.load('list_records.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_zone_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _dns_zone_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_zones_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _dns_record_ZONE_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _dns_record_RECORD_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _dns_records_ZONE_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _dns_zones_RECORD_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.json') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_linode.py libcloud-0.15.1/libcloud/test/dns/test_linode.py --- libcloud-0.5.0/libcloud/test/dns/test_linode.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_linode.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,329 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.common.linode import LinodeException +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.linode import LinodeDNSDriver + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_LINODE + + +class LinodeTests(unittest.TestCase): + def setUp(self): + LinodeDNSDriver.connectionCls.conn_classes = ( + None, LinodeMockHttp) + LinodeMockHttp.use_param = 'api_action' + LinodeMockHttp.type = None + self.driver = LinodeDNSDriver(*DNS_PARAMS_LINODE) + + def assertHasKeys(self, dictionary, keys): + for key in keys: + self.assertTrue(key in dictionary, 'key "%s" not in dictionary' % + (key)) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 7) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 2) + + zone = zones[0] + self.assertEqual(zone.id, '5093') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 'linode.com') + self.assertEqual(zone.ttl, None) + self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status']) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 2) + + arecord = records[0] + self.assertEqual(arecord.id, '3585100') + self.assertEqual(arecord.name, 'mc') + self.assertEqual(arecord.type, RecordType.A) + self.assertEqual(arecord.data, '127.0.0.1') + self.assertHasKeys(arecord.extra, ['protocol', 'ttl_sec', 'port', + 'weight']) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_success(self): + LinodeMockHttp.type = 'GET_ZONE' + + zone = self.driver.get_zone(zone_id='5093') + self.assertEqual(zone.id, '5093') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 'linode.com') + self.assertEqual(zone.ttl, None) + self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status']) + + def test_get_zone_does_not_exist(self): + LinodeMockHttp.type = 'GET_ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + LinodeMockHttp.type = 'GET_RECORD' + record = self.driver.get_record(zone_id='1234', record_id='3585100') + self.assertEqual(record.id, '3585100') + self.assertEqual(record.name, 'www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port', + 'weight']) + + def test_get_record_zone_does_not_exist(self): + LinodeMockHttp.type = 'GET_RECORD_ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='3585100') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + LinodeMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4441', record_id='3585100') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + zone = self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '5094') + self.assertEqual(zone.domain, 'foo.bar.com') + + def test_create_zone_validaton_error(self): + LinodeMockHttp.type = 'VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + except LinodeException: + pass + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + domain='libcloud.org', + ttl=10, + extra={'SOA_Email': + 'bar@libcloud.org'}) + + self.assertEqual(zone.extra['SOA_Email'], 'dns@example.com') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'libcloud.org') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, 10) + self.assertEqual(updated_zone.extra['SOA_Email'], 'bar@libcloud.org') + self.assertEqual(updated_zone.extra['status'], zone.extra['status']) + self.assertEqual(updated_zone.extra['description'], + zone.extra['description']) + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.0.0.1') + + self.assertEqual(record.id, '3585100') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, name='www', + type=RecordType.AAAA, + data='::1') + + self.assertEqual(record.data, '127.0.0.1') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.AAAA) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + LinodeMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class LinodeMockHttp(MockHttp): + fixtures = DNSFileFixtures('linode') + + def _domain_list(self, method, url, body, headers): + body = self.fixtures.load('domain_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_list(self, method, url, body, headers): + body = self.fixtures.load('resource_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, body, + headers): + body = self.fixtures.load('resource_list_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_ZONE_domain_list(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_domain_list(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_domain_resource_list(self, method, url, body, headers): + body = self.fixtures.load('get_record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, + body, headers): + body = self.fixtures.load('get_record_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_resource_list(self, method, + url, body, + headers): + body = self.fixtures.load('get_record_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_create(self, method, url, body, headers): + body = self.fixtures.load('create_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _VALIDATION_ERROR_domain_create(self, method, url, body, headers): + body = self.fixtures.load('create_domain_validation_error.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_update(self, method, url, body, headers): + body = self.fixtures.load('update_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_create(self, method, url, body, headers): + body = self.fixtures.load('create_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_update(self, method, url, body, headers): + body = self.fixtures.load('update_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ZONE_DOES_NOT_EXIST_domain_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_domain_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RECORD_DOES_NOT_EXIST_domain_resource_delete(self, method, url, body, + headers): + body = self.fixtures.load('delete_resource_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_rackspace.py libcloud-0.15.1/libcloud/test/dns/test_rackspace.py --- libcloud-0.5.0/libcloud/test/dns/test_rackspace.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_rackspace.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,490 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.common.types import LibcloudError +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.rackspace import RackspaceUSDNSDriver +from libcloud.dns.drivers.rackspace import RackspaceUKDNSDriver + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_RACKSPACE + + +class RackspaceUSTests(unittest.TestCase): + klass = RackspaceUSDNSDriver + endpoint_url = 'https://dns.api.rackspacecloud.com/v1.0/11111' + + def setUp(self): + self.klass.connectionCls.conn_classes = ( + None, RackspaceMockHttp) + RackspaceMockHttp.type = None + self.driver = self.klass(*DNS_PARAMS_RACKSPACE) + self.driver.connection.poll_interval = 0.0 + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + + def test_force_auth_token_kwargs(self): + kwargs = { + 'ex_force_auth_token': 'some-auth-token', + 'ex_force_base_url': 'https://dns.api.rackspacecloud.com/v1.0/11111' + } + driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) + driver.list_zones() + + self.assertEqual(kwargs['ex_force_auth_token'], + driver.connection.auth_token) + self.assertEqual('/v1.0/11111', + driver.connection.request_path) + + def test_force_auth_url_kwargs(self): + kwargs = { + 'ex_force_auth_version': '2.0', + 'ex_force_auth_url': 'https://identity.api.rackspace.com' + } + driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) + + self.assertEqual(kwargs['ex_force_auth_url'], + driver.connection._ex_force_auth_url) + self.assertEqual(kwargs['ex_force_auth_version'], + driver.connection._auth_version) + + def test_gets_auth_2_0_endpoint(self): + kwargs = {'ex_force_auth_version': '2.0_password'} + driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) + driver.connection._populate_hosts_and_request_paths() + + self.assertEquals(self.endpoint_url, driver.connection.get_endpoint()) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 8) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + + self.assertEqual(len(zones), 6) + self.assertEqual(zones[0].domain, 'foo4.bar.com') + self.assertEqual(zones[0].extra['comment'], 'wazaaa') + + def test_list_zones_http_413(self): + RackspaceMockHttp.type = '413' + + try: + self.driver.list_zones() + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_zones_no_results(self): + RackspaceMockHttp.type = 'NO_RESULTS' + zones = self.driver.list_zones() + self.assertEqual(len(zones), 0) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + + self.assertEqual(len(records), 3) + self.assertEqual(records[0].name, 'test3') + self.assertEqual(records[0].type, RecordType.A) + self.assertEqual(records[0].data, '127.7.7.7') + self.assertEqual(records[0].extra['ttl'], 777) + self.assertEqual(records[0].extra['comment'], 'lulz') + self.assertEqual(records[0].extra['fqdn'], 'test3.%s' % + (records[0].zone.domain)) + + def test_list_records_no_results(self): + zone = self.driver.list_zones()[0] + RackspaceMockHttp.type = 'NO_RESULTS' + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 0) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_success(self): + RackspaceMockHttp.type = 'GET_ZONE' + zone = self.driver.get_zone(zone_id='2946063') + + self.assertEqual(zone.id, '2946063') + self.assertEqual(zone.domain, 'foo4.bar.com') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.extra['email'], 'test@test.com') + + def test_get_zone_does_not_exist(self): + RackspaceMockHttp.type = 'DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + record = self.driver.get_record(zone_id='12345678', + record_id='23456789') + self.assertEqual(record.id, 'A-7423034') + self.assertEqual(record.name, 'test3') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.extra['comment'], 'lulz') + + def test_get_record_zone_does_not_exist(self): + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + RackspaceMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='12345678', + record_id='28536') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + RackspaceMockHttp.type = 'CREATE_ZONE' + + zone = self.driver.create_zone(domain='bar.foo1.com', type='master', + ttl=None, + extra={'email': 'test@test.com'}) + self.assertEqual(zone.id, '2946173') + self.assertEqual(zone.domain, 'bar.foo1.com') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.extra['email'], 'test@test.com') + + def test_create_zone_validaton_error(self): + RackspaceMockHttp.type = 'CREATE_ZONE_VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=10, + extra={'email': 'test@test.com'}) + except Exception: + e = sys.exc_info()[1] + self.assertEqual(str(e), 'Validation errors: Domain TTL is ' + + 'required and must be greater than ' + + 'or equal to 300') + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + extra={'comment': + 'bar foo'}) + + self.assertEqual(zone.extra['comment'], 'wazaaa') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'foo4.bar.com') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, zone.ttl) + self.assertEqual(updated_zone.extra['comment'], 'bar foo') + + def test_update_zone_domain_cannot_be_changed(self): + zone = self.driver.list_zones()[0] + + try: + self.driver.update_zone(zone=zone, domain='libcloud.org') + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'CREATE_RECORD' + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.1.1.1') + + self.assertEqual(record.id, 'A-7423317') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.1.1.1') + self.assertEqual(record.extra['fqdn'], 'www.%s' % (zone.domain)) + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, + data='127.3.3.3') + + self.assertEqual(record.name, 'test3') + self.assertEqual(record.data, '127.7.7.7') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, record.name) + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, record.type) + self.assertEqual(updated_record.data, '127.3.3.3') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + RackspaceMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + def test_to_full_record_name_name_provided(self): + domain = 'foo.bar' + name = 'test' + self.assertEqual(self.driver._to_full_record_name(domain, name), + 'test.foo.bar') + + def test_to_full_record_name_name_not_provided(self): + domain = 'foo.bar' + name = None + self.assertEqual(self.driver._to_full_record_name(domain, name), + 'foo.bar') + + def test_to_partial_record_name(self): + domain = 'example.com' + names = ['test.example.com', 'foo.bar.example.com', + 'example.com.example.com', 'example.com'] + expected_values = ['test', 'foo.bar', 'example.com', None] + + for name, expected_value in zip(names, expected_values): + value = self.driver._to_partial_record_name(domain=domain, + name=name) + self.assertEqual(value, expected_value) + + +class RackspaceUKTests(RackspaceUSTests): + klass = RackspaceUKDNSDriver + endpoint_url = 'https://lon.dns.api.rackspacecloud.com/v1.0/11111' + + +class RackspaceMockHttp(MockHttp): + fixtures = DNSFileFixtures('rackspace') + base_headers = {'content-type': 'application/json'} + + def _v2_0_tokens(self, method, url, body, headers): + body = self.fixtures.load('auth_2_0.json') + headers = { + 'content-type': 'application/json' + } + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains(self, method, url, body, headers): + body = self.fixtures.load('list_zones_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_413(self, method, url, body, headers): + body = '' + return (httplib.REQUEST_ENTITY_TOO_LARGE, body, self.base_headers, + httplib.responses[httplib.REQUEST_ENTITY_TOO_LARGE]) + + def _v1_0_11111_domains_NO_RESULTS(self, method, url, body, headers): + body = self.fixtures.load('list_zones_no_results.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('list_records_success.json') + elif method == 'PUT': + # Async - update_zone + body = self.fixtures.load('update_zone_success.json') + elif method == 'DELETE': + # Aync - delete_zone + body = self.fixtures.load('delete_zone_success.json') + + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_NO_RESULTS(self, method, url, body, + headers): + body = self.fixtures.load('list_records_no_results.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_2946063_GET_ZONE(self, method, url, body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_4444_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_12345678(self, method, url, body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_12345678_records_23456789(self, method, url, body, + headers): + body = self.fixtures.load('get_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_444_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_12345678_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_12345678_records_28536_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_CREATE_ZONE(self, method, url, body, headers): + # Async response - create_zone + body = self.fixtures.load('create_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_288795f9_e74d_48be_880b_a9e36e0de61e_CREATE_ZONE(self, method, url, body, headers): + # Async status - create_zone + body = self.fixtures.load('create_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_CREATE_ZONE_VALIDATION_ERROR(self, method, url, body, headers): + body = self.fixtures.load('create_zone_validation_error.json') + return (httplib.BAD_REQUEST, body, self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + def _v1_0_11111_status_116a8f17_38ac_4862_827c_506cd04800d5(self, method, url, body, headers): + # Aync status - update_zone + body = self.fixtures.load('update_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_586605c8_5739_43fb_8939_f3a2c4c0e99c_CREATE_RECORD(self, method, url, body, headers): + # Aync status - create_record + body = self.fixtures.load('create_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_CREATE_RECORD(self, method, url, body, headers): + # Aync response - create_record + body = self.fixtures.load('create_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_251c0d0c_95bc_4e09_b99f_4b8748b66246(self, method, url, body, headers): + # Aync response - update_record + body = self.fixtures.load('update_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_A_7423034(self, method, url, body, + headers): + # Aync response - update_record + body = self.fixtures.load('update_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_0b40cd14_2e5d_490f_bb6e_fdc65d1118a9(self, method, + url, body, + headers): + # Async status - delete_zone + body = self.fixtures.load('delete_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_0b40cd14_2e5d_490f_bb6e_fdc65d1118a9_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + # Async status - delete_record + body = self.fixtures.load('delete_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_A_7423034_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + # Async response - delete_record + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_route53.py libcloud-0.15.1/libcloud/test/dns/test_route53.py --- libcloud-0.5.0/libcloud/test/dns/test_route53.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_route53.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,267 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.route53 import Route53DNSDriver +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_ROUTE53 + + +class Route53Tests(unittest.TestCase): + def setUp(self): + Route53DNSDriver.connectionCls.conn_classes = ( + Route53MockHttp, Route53MockHttp) + Route53MockHttp.type = None + self.driver = Route53DNSDriver(*DNS_PARAMS_ROUTE53) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 10) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 5) + + zone = zones[0] + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 10) + + record = records[1] + self.assertEqual(record.name, 'www') + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '208.111.35.173') + self.assertEqual(record.extra['ttl'], 86400) + + record = records[3] + self.assertEqual(record.type, RecordType.MX) + self.assertEqual(record.data, 'ASPMX.L.GOOGLE.COM.') + self.assertEqual(record.extra['priority'], 1) + + record = records[4] + self.assertEqual(record.type, RecordType.MX) + self.assertEqual(record.data, 'ALT1.ASPMX.L.GOOGLE.COM.') + self.assertEqual(record.extra['priority'], 5) + + record = records[8] + self.assertEqual(record.type, RecordType.SRV) + self.assertEqual(record.data, 'xmpp-server.example.com.') + self.assertEqual(record.extra['priority'], 1) + self.assertEqual(record.extra['weight'], 10) + self.assertEqual(record.extra['port'], 5269) + + def test_get_zone(self): + zone = self.driver.get_zone(zone_id='47234') + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 't.com') + + def test_get_record(self): + record = self.driver.get_record(zone_id='47234', + record_id='CNAME:wibble') + self.assertEqual(record.name, 'wibble') + self.assertEqual(record.type, RecordType.CNAME) + self.assertEqual(record.data, 't.com') + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_does_not_exist(self): + Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='47234') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '47234') + else: + self.fail('Exception was not thrown') + + def test_get_record_zone_does_not_exist(self): + Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST' + + rid = 'CNAME:doesnotexist.t.com' + try: + self.driver.get_record(zone_id='47234', + record_id=rid) + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone(self): + zone = self.driver.create_zone(domain='t.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '47234') + self.assertEqual(zone.domain, 't.com') + + def test_create_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record( + name='www', zone=zone, + type=RecordType.A, data='127.0.0.1', + extra={'ttl': 0} + ) + + self.assertEqual(record.id, 'A:www') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[1] + + params = { + 'record': record, + 'name': 'www', + 'type': RecordType.A, + 'data': '::1', + 'extra': {'ttle': 0}} + updated_record = self.driver.update_record(**params) + + self.assertEqual(record.data, '208.111.35.173') + + self.assertEqual(updated_record.id, 'A:www') + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.A) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class Route53MockHttp(MockHttp): + fixtures = DNSFileFixtures('route53') + + def _2012_02_29_hostedzone_47234(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_02_29_hostedzone(self, method, url, body, headers): + # print method, url, body, headers + if method == "POST": + body = self.fixtures.load("create_zone.xml") + return (httplib.CREATED, body, {}, httplib.responses[httplib.OK]) + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_02_29_hostedzone_47234_rrset(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_02_29_hostedzone_47234_rrset_ZONE_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.xml') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _2012_02_29_hostedzone_4444_ZONE_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.xml') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _2012_02_29_hostedzone_47234_ZONE_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('zone_does_not_exist.xml') + return (httplib.NOT_FOUND, body, + {}, httplib.responses[httplib.NOT_FOUND]) + + def _2012_02_29_hostedzone_47234_rrset_RECORD_DOES_NOT_EXIST(self, method, + url, body, headers): + if method == "POST": + body = self.fixtures.load('invalid_change_batch.xml') + return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) + body = self.fixtures.load('record_does_not_exist.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_02_29_hostedzone_47234_RECORD_DOES_NOT_EXIST(self, method, + url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/dns/test_zerigo.py libcloud-0.15.1/libcloud/test/dns/test_zerigo.py --- libcloud-0.5.0/libcloud/test/dns/test_zerigo.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/dns/test_zerigo.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,374 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.zerigo import ZerigoDNSDriver, ZerigoError + +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_ZERIGO + + +class ZerigoTests(unittest.TestCase): + def setUp(self): + ZerigoDNSDriver.connectionCls.conn_classes = ( + None, ZerigoMockHttp) + ZerigoMockHttp.type = None + self.driver = ZerigoDNSDriver(*DNS_PARAMS_ZERIGO) + + def test_invalid_credentials(self): + ZerigoMockHttp.type = 'INVALID_CREDS' + + try: + list(self.driver.list_zones()) + except InvalidCredsError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 13) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 1) + self.assertEqual(zones[0].domain, 'example.com') + self.assertEqual(zones[0].type, 'master') + self.assertEqual(zones[0].extra['notes'], 'test foo bar') + + def test_list_zones_no_results(self): + ZerigoMockHttp.type = 'NO_RESULTS' + zones = self.driver.list_zones() + self.assertEqual(len(zones), 0) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = list(self.driver.list_records(zone=zone)) + + self.assertEqual(len(records), 4) + self.assertEqual(records[0].name, 'www') + self.assertEqual(records[0].type, RecordType.A) + self.assertEqual(records[0].data, '172.16.16.1') + self.assertEqual(records[0].extra['fqdn'], 'www.example.com') + self.assertEqual(records[0].extra['notes'], None) + self.assertEqual(records[0].extra['priority'], None) + + self.assertEqual(records[1].name, 'test') + self.assertEqual(records[1].extra['ttl'], 3600) + + def test_record_with_empty_name(self): + zone = self.driver.list_zones()[0] + record1 = list(self.driver.list_records(zone=zone))[-1] + record2 = list(self.driver.list_records(zone=zone))[-2] + + self.assertEqual(record1.name, None) + self.assertEqual(record2.name, None) + + def test_list_records_no_results(self): + zone = self.driver.list_zones()[0] + ZerigoMockHttp.type = 'NO_RESULTS' + records = list(self.driver.list_records(zone=zone)) + self.assertEqual(len(records), 0) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + list(self.driver.list_records(zone=zone)) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + pass + + def test_get_zone_success(self): + zone = self.driver.get_zone(zone_id=12345678) + + self.assertEqual(zone.id, '12345678') + self.assertEqual(zone.domain, 'example.com') + self.assertEqual(zone.extra['hostmaster'], 'dnsadmin@example.com') + self.assertEqual(zone.type, 'master') + + def test_get_zone_does_not_exist(self): + ZerigoMockHttp.type = 'DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + record = self.driver.get_record(zone_id='12345678', + record_id='23456789') + self.assertEqual(record.id, '23456789') + self.assertEqual(record.name, 'www') + self.assertEqual(record.type, RecordType.A) + + def test_get_record_zone_does_not_exist(self): + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='12345678', + record_id='28536') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + ZerigoMockHttp.type = 'CREATE_ZONE' + + zone = self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '12345679') + self.assertEqual(zone.domain, 'foo.bar.com') + + def test_create_zone_validaton_error(self): + ZerigoMockHttp.type = 'CREATE_ZONE_VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=10, extra=None) + except ZerigoError: + e = sys.exc_info()[1] + self.assertEqual(len(e.errors), 2) + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + ttl=10, + extra={'notes': + 'bar foo'}) + + self.assertEqual(zone.extra['notes'], 'test foo bar') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'example.com') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, 10) + self.assertEqual(updated_zone.extra['notes'], 'bar foo') + + def test_update_zone_domain_cannot_be_changed(self): + zone = self.driver.list_zones()[0] + + try: + self.driver.update_zone(zone=zone, domain='libcloud.org') + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'CREATE_RECORD' + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.0.0.1') + + self.assertEqual(record.id, '23456780') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, name='www', + type=RecordType.AAAA, + data='::1') + + self.assertEqual(record.data, '172.16.16.1') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.AAAA) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class ZerigoMockHttp(MockHttp): + fixtures = DNSFileFixtures('zerigo') + + def _api_1_1_zones_xml_INVALID_CREDS(self, method, url, body, headers): + body = 'HTTP Basic: Access denied.\n' + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {'x-query-count': 1}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_NO_RESULTS(self, method, url, body, headers): + body = self.fixtures.load('list_zones_no_results.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {'x-query-count': 1}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_NO_RESULTS(self, method, url, body, + headers): + body = self.fixtures.load('list_records_no_results.xml') + return (httplib.OK, body, {'x-query-count': 0}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_ZONE_DOES_NOT_EXIST(self, method, + url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_4444_xml_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_23456789_xml(self, method, url, body, headers): + body = self.fixtures.load('get_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_444_xml_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_28536_xml_RECORD_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_CREATE_ZONE(self, method, url, body, headers): + body = self.fixtures.load('create_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_CREATE_ZONE_VALIDATION_ERROR(self, method, url, + body, headers): + body = self.fixtures.load('create_zone_validation_error.xml') + return (httplib.UNPROCESSABLE_ENTITY, body, {}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_CREATE_RECORD(self, method, url, + body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.CREATED, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_23456789_xml_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + """ + def (self, method, url, body, headers): + body = self.fixtures.load('.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + """ + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/file_fixtures.py libcloud-0.15.1/libcloud/test/file_fixtures.py --- libcloud-0.5.0/libcloud/test/file_fixtures.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/file_fixtures.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,81 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helper class for loading large fixture data +from __future__ import with_statement + +import os + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import u + +FIXTURES_ROOT = { + 'compute': 'compute/fixtures', + 'storage': 'storage/fixtures', + 'loadbalancer': 'loadbalancer/fixtures', + 'dns': 'dns/fixtures', + 'openstack': 'compute/fixtures/openstack', +} + + +class FileFixtures(object): + def __init__(self, fixtures_type, sub_dir=''): + script_dir = os.path.abspath(os.path.split(__file__)[0]) + self.root = os.path.join(script_dir, FIXTURES_ROOT[fixtures_type], + sub_dir) + + def load(self, file): + path = os.path.join(self.root, file) + if os.path.exists(path): + if PY3: + kwargs = {'encoding': 'utf-8'} + else: + kwargs = {} + + with open(path, 'r', **kwargs) as fh: + content = fh.read() + return u(content) + else: + raise IOError(path) + + +class ComputeFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(ComputeFileFixtures, self).__init__(fixtures_type='compute', + sub_dir=sub_dir) + + +class StorageFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(StorageFileFixtures, self).__init__(fixtures_type='storage', + sub_dir=sub_dir) + + +class LoadBalancerFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(LoadBalancerFileFixtures, self).__init__(fixtures_type='loadbalancer', + sub_dir=sub_dir) + + +class DNSFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(DNSFileFixtures, self).__init__(fixtures_type='dns', + sub_dir=sub_dir) + + +class OpenStackFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(OpenStackFixtures, self).__init__(fixtures_type='openstack', + sub_dir=sub_dir) diff -Nru libcloud-0.5.0/libcloud/test/__init__.py libcloud-0.15.1/libcloud/test/__init__.py --- libcloud-0.5.0/libcloud/test/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/__init__.py 2013-11-29 12:35:04.000000000 +0000 @@ -0,0 +1,345 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import random + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import StringIO +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs +from libcloud.utils.py3 import parse_qsl +from libcloud.utils.py3 import u +from libcloud.utils.py3 import unittest2_required + +if unittest2_required: + import unittest2 as unittest +else: + import unittest + + +XML_HEADERS = {'content-type': 'application/xml'} + + +class LibcloudTestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + self._visited_urls = [] + self._executed_mock_methods = [] + super(LibcloudTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + self._visited_urls = [] + self._executed_mock_methods = [] + + def _add_visited_url(self, url): + self._visited_urls.append(url) + + def _add_executed_mock_method(self, method_name): + self._executed_mock_methods.append(method_name) + + def assertExecutedMethodCount(self, expected): + actual = len(self._executed_mock_methods) + self.assertEqual(actual, expected, + 'expected %d, but %d mock methods were executed' + % (expected, actual)) + + +class multipleresponse(object): + """ + A decorator that allows MockHttp objects to return multi responses + """ + count = 0 + func = None + + def __init__(self, f): + self.func = f + + def __call__(self, *args, **kwargs): + ret = self.func(self.func.__class__, *args, **kwargs) + response = ret[self.count] + self.count = self.count + 1 + return response + + +class MockResponse(object): + """ + A mock HTTPResponse + """ + headers = {} + body = StringIO() + status = 0 + reason = '' + version = 11 + + def __init__(self, status, body=None, headers=None, reason=None): + self.status = status + self.body = StringIO(u(body)) if body else StringIO() + self.headers = headers or self.headers + self.reason = reason or self.reason + + def read(self, *args, **kwargs): + return self.body.read(*args, **kwargs) + + def next(self): + if sys.version_info >= (2, 5) and sys.version_info <= (2, 6): + return self.body.next() + else: + return next(self.body) + + def __next__(self): + return self.next() + + def getheader(self, name, *args, **kwargs): + return self.headers.get(name, *args, **kwargs) + + def getheaders(self): + return list(self.headers.items()) + + def msg(self): + raise NotImplemented + + +class BaseMockHttpObject(object): + def _get_method_name(self, type, use_param, qs, path): + path = path.split('?')[0] + meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_') + + if type: + meth_name = '%s_%s' % (meth_name, self.type) + + if use_param and use_param in qs: + param = qs[use_param][0].replace('.', '_').replace('-', '_') + meth_name = '%s_%s' % (meth_name, param) + + return meth_name + + +class MockHttp(BaseMockHttpObject): + """ + A mock HTTP client/server suitable for testing purposes. This replaces + `HTTPConnection` by implementing its API and returning a mock response. + + Define methods by request path, replacing slashes (/) with underscores (_). + Each of these mock methods should return a tuple of: + + (int status, str body, dict headers, str reason) + + >>> mock = MockHttp('localhost', 8080) + >>> mock.request('GET', '/example/') + >>> response = mock.getresponse() + >>> response.body.read() + 'Hello World!' + >>> response.status + 200 + >>> response.getheaders() + [('X-Foo', 'libcloud')] + >>> MockHttp.type = 'fail' + >>> mock.request('GET', '/example/') + >>> response = mock.getresponse() + >>> response.body.read() + 'Oh Noes!' + >>> response.status + 403 + >>> response.getheaders() + [('X-Foo', 'fail')] + + """ + responseCls = MockResponse + host = None + port = None + response = None + + type = None + use_param = None # will use this param to namespace the request function + + test = None # TestCase instance which is using this mock + + def __init__(self, host, port, *args, **kwargs): + self.host = host + self.port = port + + def request(self, method, url, body=None, headers=None, raw=False): + # Find a method we can use for this request + parsed = urlparse.urlparse(url) + scheme, netloc, path, params, query, fragment = parsed + qs = parse_qs(query) + if path.endswith('/'): + path = path[:-1] + meth_name = self._get_method_name(type=self.type, + use_param=self.use_param, + qs=qs, path=path) + meth = getattr(self, meth_name.replace('%', '_')) + + if self.test and isinstance(self.test, LibcloudTestCase): + self.test._add_visited_url(url=url) + self.test._add_executed_mock_method(method_name=meth_name) + + status, body, headers, reason = meth(method, url, body, headers) + self.response = self.responseCls(status, body, headers, reason) + + def getresponse(self): + return self.response + + def connect(self): + """ + Can't think of anything to mock here. + """ + pass + + def close(self): + pass + + # Mock request/response example + def _example(self, method, url, body, headers): + """ + Return a simple message and header, regardless of input. + """ + return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'}, + httplib.responses[httplib.OK]) + + def _example_fail(self, method, url, body, headers): + return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'}, + httplib.responses[httplib.FORBIDDEN]) + + +class MockHttpTestCase(MockHttp, unittest.TestCase): + # Same as the MockHttp class, but you can also use assertions in the + # classes which inherit from this one. + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self) + + if kwargs.get('host', None) and kwargs.get('port', None): + MockHttp.__init__(self, *args, **kwargs) + + def runTest(self): + pass + + def assertUrlContainsQueryParams(self, url, expected_params, strict=False): + """ + Assert that provided url contains provided query parameters. + + :param url: URL to assert. + :type url: ``str`` + + :param expected_params: Dictionary of expected query parameters. + :type expected_params: ``dict`` + + :param strict: Assert that provided url contains only expected_params. + (defaults to ``False``) + :type strict: ``bool`` + """ + question_mark_index = url.find('?') + + if question_mark_index != -1: + url = url[question_mark_index + 1:] + + params = dict(parse_qsl(url)) + + if strict: + self.assertDictEqual(params, expected_params) + else: + for key, value in expected_params.items(): + self.assertEqual(params[key], value) + + +class StorageMockHttp(MockHttp): + def putrequest(self, method, action): + pass + + def putheader(self, key, value): + pass + + def endheaders(self): + pass + + def send(self, data): + pass + + +class MockRawResponse(BaseMockHttpObject): + """ + Mock RawResponse object suitable for testing. + """ + + type = None + responseCls = MockResponse + + def __init__(self, connection): + super(MockRawResponse, self).__init__() + self._data = [] + self._current_item = 0 + + self._status = None + self._response = None + self._headers = None + self._reason = None + self.connection = connection + + def next(self): + if self._current_item == len(self._data): + raise StopIteration + + value = self._data[self._current_item] + self._current_item += 1 + return value + + def __next__(self): + return self.next() + + def _generate_random_data(self, size): + data = '' + current_size = 0 + while current_size < size: + value = str(random.randint(0, 9)) + value_size = len(value) + data += value + current_size += value_size + + return data + + @property + def response(self): + return self._get_response_if_not_availale() + + @property + def status(self): + self._get_response_if_not_availale() + return self._status + + @property + def headers(self): + self._get_response_if_not_availale() + return self._headers + + @property + def reason(self): + self._get_response_if_not_availale() + return self._reason + + def _get_response_if_not_availale(self): + if not self._response: + meth_name = self._get_method_name(type=self.type, + use_param=False, qs=None, + path=self.connection.action) + meth = getattr(self, meth_name.replace('%', '_')) + result = meth(self.connection.method, None, None, None) + self._status, self._body, self._headers, self._reason = result + self._response = self.responseCls(self._status, self._body, + self._headers, self._reason) + return self._response + +if __name__ == "__main__": + import doctest + doctest.testmod() diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,22 @@ +[{"id": "lba-1235f", + "resource_type": "load_balancer", + "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-1235f", + "name": "lb1", + "created_at": "2011-10-06T14:50:28Z", + "deleted_at": null, + "status": "active", + "listeners": [{"out": 80, "protocol": "http", "in": 80}], + "cloud_ips": + [{"id": "cip-c2v98", + "public_ip": "109.107.37.179", + "resource_type": "cloud_ip", + "reverse_dns": "cip-109-107-37-179.gb1.brightbox.com", + "status": "mapped", + "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-c2v98"}], + "account": + {"id": "acc-43ks4", + "resource_type": "account", + "url": "https://api.gb1.brightbox.com/1.0/account", + "name": "Brightbox", + "status": "active"}, + "nodes": []}] diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_lba_1235f.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ +{"id": "lba-1235f", + "resource_type": "load_balancer", + "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-1235f", + "policy": "least-connections", + "name": "lb1", + "created_at": "2011-10-01T01:00:00Z", + "deleted_at": null, + "healthcheck": + {"threshold_down": 3, + "timeout": 5000, + "port": 80, + "request": "/", + "type": "http", + "interval": 5000, + "threshold_up": 3}, + "listeners": + [{"out": 80, + "protocol": "http", + "in": 80}], + "status": "active", + "cloud_ips": + [], + "account": + {"id": "acc-43ks4", + "resource_type": "account", + "url": "https://api.gb1.brightbox.com/1.0/account", + "name": "Brightbox", + "status": "active"}, + "nodes": + [{"id": "srv-lv426", + "resource_type": "server", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426", + "name": "web1", + "created_at": "2011-10-01T01:00:00Z", + "deleted_at": null, + "hostname": "srv-lv426", + "started_at": "2011-10-01T01:01:00Z", + "status": "active"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/load_balancers_post.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ +{"id": "lba-o466u", + "resource_type": "load_balancer", + "url": "https://api.gb1.brightbox.com/1.0/load_balancers/lba-o466u", + "policy": "least-connections", + "name": "lb2", + "created_at": "2011-10-01T01:00:00Z", + "deleted_at": null, + "healthcheck": + {"threshold_down": 3, + "timeout": 5000, + "port": 80, + "request": "/", + "type": "http", + "interval": 5000, + "threshold_up": 3}, + "listeners": + [{"out": 80, + "protocol": "http", + "in": 80}], + "status": "creating", + "cloud_ips": + [], + "account": + {"id": "acc-43ks4", + "resource_type": "account", + "url": "https://api.gb1.brightbox.com/1.0/account", + "name": "Brightbox", + "status": "active"}, + "nodes": + [{"id": "srv-lv426", + "resource_type": "server", + "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426", + "name": "web1", + "created_at": "2011-10-01T01:00:00Z", + "deleted_at": null, + "hostname": "srv-lv426", + "started_at": "2011-10-01T01:01:00Z", + "status": "active"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/token.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/token.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/brightbox/token.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/brightbox/token.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"access_token": "k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "assigntoloadbalancerruleresponse" : {"jobid":17341} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "associateipaddressresponse" : {"jobid":17346,"id":34000} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "createloadbalancerruleresponse" : { "loadbalancer" : {"id":2253,"name":"fake","publicipid":34000,"publicip":"1.1.1.49","publicport":"80","privateport":"80","algorithm":"roundrobin","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Add"} } } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "deleteloadbalancerruleresponse" : {"jobid":17342} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "disassociateipaddressresponse" : {"jobid":17344} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listloadbalancerruleinstancesresponse" : { "loadbalancerruleinstance" : [ {"id":2614,"name":"test_1308874974","displayname":"test_1308874974","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:22:56+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2185,"networkkbswrite":109,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3914,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.3.122","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2615,"name":"test_1308875456","displayname":"test_1308875456","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:30:57+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":1118,"networkkbswrite":75,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3915,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.2.62","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listloadbalancerrulesresponse" : { "loadbalancerrule" : [ {"id":2247,"name":"test","publicipid":34000,"publicip":"1.1.1.49","publicport":"80","privateport":"80","algorithm":"roundrobin","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"},{"id":2249,"name":"testmore","publicipid":34001,"publicip":"1.1.2.49","publicport":"80","privateport":"80","algorithm":"leastconn","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"} ] } } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/listZones_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17340,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17341,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17342,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17344,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17346,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"ipaddress":{"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-24T05:52:55+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocating"}}} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{ "removefromloadbalancerruleresponse" : {"jobid":17340} } diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer_policy.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,3 @@ + + tests.example.com + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/create_load_balancer.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,3 @@ + + tests.example.com + diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/deregister_instances_from_load_balancer.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancer_policies.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + MyDurationStickyPolicy + LBCookieStickinessPolicyType + + + CookieExpirationPeriod + 60 + + + + + MyAppStickyPolicy + AppCookieStickinessPolicyType + + + CookieName + MyAppCookie + + + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers_policy_types.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + ProxyProtocol + Boolean + ONE + + + ProxyProtocolPolicyType + Policy that controls whether to include the IP address and port of the originating request for TCP messages. + This policy operates on TCP/SSL listeners only + + + + + 1549581b-12b7-11e3-895e-1334aEXAMPLE + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/describe_load_balancers.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,61 @@ + + + + + + + tests + 2013-01-01T00:00:00.19000Z + + 30 + TCP:22 + 10 + 5 + 2 + + vpc-56e10e3d + + + + AWSConsolePolicy-1 + + + HTTP + 80 + HTTP + 80 + + + + + + i-64bd081c + + + + + + + + AWSConsolePolicy-1 + 30 + + + + + us-east-1e + + tests.us-east-1.elb.amazonaws.com + Z3ZONEID + internet-facing + tests.us-east-1.elb.amazonaws.com + + + + + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + + diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_for_backend_server.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,6 @@ + + + + 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/elb/set_load_balancer_policies_of_listener.xml 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/ip_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,316 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868102, + "ip": "10.0.0.69", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868106, + "ip": "10.0.0.73", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868107, + "ip": "10.0.0.74", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868108, + "ip": "10.0.0.75", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277337, + "ip": "10.0.0.244", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277338, + "ip": "10.0.0.245", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277342, + "ip": "10.0.0.249", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277343, + "ip": "10.0.0.250", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277344, + "ip": "10.0.0.251", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277345, + "ip": "10.0.0.252", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277346, + "ip": "10.0.0.253", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277347, + "ip": "10.0.0.254", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + } + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 16, + "start": 0, + "total": 16 + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,141 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "name": "test2", + "id": 123, + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.1.0.10", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.10/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.1.0.11", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.11/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.1.0.12", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.12/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "1.1.1.1", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "1.1.1.1/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/add", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,164 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23530, + "name": "test2", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868108, + "ip": "10.0.0.75", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/edit", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,141 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23530, + "name": "test2", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/get", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,224 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23517, + "name": "foo", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868099, + "ip": "10.0.0.66", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23526, + "name": "bar", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868100, + "ip": "10.0.0.67", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 2, + "start": 0, + "total": 2 + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/gogrid/unexpected_error.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"summary":{"total":1,"start":0,"returned":1},"status":"failure","method":"/grid/loadbalancer/add","list":[{"message":"An unexpected server error has occured. Please email this error to apisupport@gogrid.com. Error Message : null","object":"error","errorcode":"UnexpectedException"}]} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/auth_2_0.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,71 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "expires": "2031-03-14T08:10:14.000-05:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "publicURL": "https://storage101.dfw1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "internalURL": "https://snet-storage101.dfw1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "11111", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/11111", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + } + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "tenantId": "11111", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/11111", + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "publicURL": "https://cdn1.clouddrive.com/v1/MossoCloudFS_bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + } + ], + "user": { + "id": "9586", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "libclouduser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/error_page_default.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errorpage":{"content":"Service Unavailable

Service Unavailable

The service is temporarily unavailable. Please try again later.

"}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_0_slug_loadbalancers_8290_usage_current.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,35 @@ +{ + "links": [], + "loadBalancerUsageRecords": [ + { + "id": 1234, + "startTime": "2013-04-22T22:00:00Z", + "endTime": "2013-04-22T23:00:00Z", + "numVips": 1, + "incomingTransfer": 0, + "outgoingTransfer": 0, + "incomingTransferSsl": 6182163, + "outgoingTransferSsl": 9702071, + "vipType": "PUBLIC", + "averageNumConnections": 0, + "averageNumConnectionsSsl": 14.9166666666666, + "numPolls": 12, + "sslMode": "ON" + }, + { + "id": 12345, + "startTime": "2013-04-22T23:00:00Z", + "endTime": "2013-04-23T00:00:00Z", + "numVips": 1, + "incomingTransfer": 0, + "outgoingTransfer": 0, + "incomingTransferSsl": 6815503, + "outgoingTransferSsl": 10474092, + "vipType": "PUBLIC", + "averageNumConnections": 0, + "averageNumConnectionsSsl": 19.9166666666667, + "numPolls": 12, + "sslMode": "ON" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_accesslist.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_errorpage.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errorpage":{"content":"Service Unavailable

Service Unavailable

The service is temporarily unavailable. Please try again later.

"}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18940_ex_public_ips.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:32:05Z"},"connectionLogging":{"enabled":false}}} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18941_ex_private_ips.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"servicenet","id":18941,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"BUILD","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.183.32.247","id":94693,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:33:28Z"},"virtualIps":[{"address":"10.183.252.175","id":572,"type":"SERVICENET","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:33:28Z"},"connectionLogging":{"enabled":false}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_18945_ex_public_ips.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"RANDOM","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED"}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T13:32:05Z"},"connectionLogging":{"enabled":false}}} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_3xxx.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "loadBalancer": { + "algorithm": "UUUUUUUUUU", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50Z" + }, + "id": 9999999, + "name": "test2", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE" + }, + { + "address": "10.1.0.10", + "condition": "ENABLED", + "id": 30945, + "port": 80, + "status": "ONLINE" + } + ], + "port": 88888, + "protocol": "XXXXX", + "status": "ACTIVE", + "updated": { + "time": "2011-04-07T16:28:12Z" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_errorpage.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"errorpage":{"content":"Generic Error Page"}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,56 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50Z" + }, + "id": 8290, + "name": "test2", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 12 + }, + { + "address": "10.1.0.10", + "condition": "DISABLED", + "id": 30945, + "port": 80, + "status": "OFFLINE", + "weight": 8 + }, + { + "address": "10.1.0.9", + "condition": "DRAINING", + "id": 30946, + "port": 8080, + "status": "DRAINING", + "weight": 20 + } + ], + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-07T16:28:12Z" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,28 @@ +{ + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 12 + }, + { + "address": "10.1.0.10", + "condition": "DISABLED", + "id": 30945, + "port": 80, + "status": "OFFLINE", + "weight": 8 + }, + { + "address": "10.1.0.9", + "condition": "DRAINING", + "id": 30946, + "port": 8080, + "status": "DRAINING", + "weight": 20 + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "nodes": [ + { + "address": "10.1.0.12", + "condition": "ENABLED", + "id": 30972, + "port": 80, + "status": "ONLINE", + "weight": 1 + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8291.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,56 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50Z" + }, + "id": 8291, + "name": "test8291", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 12 + }, + { + "address": "10.1.0.10", + "condition": "DISABLED", + "id": 30945, + "port": 80, + "status": "OFFLINE", + "weight": 8 + }, + { + "address": "10.1.0.9", + "condition": "DRAINING", + "id": 30946, + "port": 8080, + "status": "DRAINING", + "weight": 20 + } + ], + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-07T16:28:12Z" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,56 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50Z" + }, + "id": 8292, + "name": "test8292", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 12 + }, + { + "address": "10.1.0.10", + "condition": "DISABLED", + "id": 30945, + "port": 80, + "status": "OFFLINE", + "weight": 8 + }, + { + "address": "10.1.0.9", + "condition": "DRAINING", + "id": 30946, + "port": 8080, + "status": "DRAINING", + "weight": 20 + } + ], + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-07T16:28:12Z" + }, + "virtualIps": [ + { + "address": "1.1.1.2", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8292_nodes_post.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "nodes": [ + { + "address": "10.1.0.12", + "condition": "ENABLED", + "id": 30972, + "port": 80, + "status": "ONLINE", + "weight": 1 + }, + { + "address": "10.1.0.13", + "condition": "ENABLED", + "id": 30973, + "port": 80, + "status": "ONLINE", + "weight": 1 + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94692_weighted_round_robin.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_ROUND_ROBIN","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T14:39:40Z"},"connectionLogging":{"enabled":false}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94693_weighted_least_connections.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T15:18:38Z"},"connectionLogging":{"enabled":false}}} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94694_unknown_algorithm.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":18940,"protocol":"HTTP","port":80,"algorithm":"UNSPECIFIED_FUTURE_ALGORITHM","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T15:18:38Z"},"connectionLogging":{"enabled":false}}} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94695_full_details.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":94695,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"CONNECT","delay":10,"timeout":5,"attemptsBeforeDeactivation":2},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:01:36Z"},"connectionThrottle":{"maxConnections":200,"minConnections":50,"maxConnectionRate":50,"rateInterval":10},"connectionLogging":{"enabled":true}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94696_http_health_monitor.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":94696,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTP","path":"/","delay":10,"timeout":5,"attemptsBeforeDeactivation":2,"statusRegex":"^[234][0-9][0-9]$","bodyRegex":"Hello World!"},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:51:32Z"},"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94697_https_health_monitor.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":94697,"protocol":"HTTPS","port":443,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"PENDING_UPDATE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTPS","path":"/test","delay":15,"timeout":12,"attemptsBeforeDeactivation":5,"statusRegex":"^[234][0-9][0-9]$","bodyRegex":"abcdef"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T19:34:34Z"},"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"}],"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_access_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"},{"address":"8.8.8.8/0","id":3006,"type":"DENY"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94698_with_access_list.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":94698,"protocol":"HTTPS","port":443,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"OFFLINE","condition":"DRAINING","weight":25},{"address":"10.181.238.11","id":97683,"port":443,"status":"OFFLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"CONNECT","delay":5,"timeout":10,"attemptsBeforeDeactivation":4},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2012-01-05T19:31:38Z"},"accessList":[{"address":"0.0.0.0/0","id":2883,"type":"DENY"},{"address":"2001:4801:7901::6/64","id":2884,"type":"ALLOW"},{"address":"8.8.8.8/0","id":3006,"type":"DENY"}],"connectionThrottle":{"maxConnections":200,"minConnections":50,"maxConnectionRate":50,"rateInterval":10},"connectionLogging":{"enabled":true}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"loadBalancer":{"name":"new ord balancer","id":94700,"protocol":"HTTP","port":80,"algorithm":"WEIGHTED_LEAST_CONNECTIONS","status":"ACTIVE","cluster":{"name":"ztm-n06.lbaas.ord1.rackspace.net"},"nodes":[{"address":"10.181.231.202","id":94692,"port":80,"status":"ONLINE","condition":"ENABLED","weight":1}],"created":{"time":"2011-12-09T13:30:40Z"},"healthMonitor":{"type":"HTTP","path":"/","delay":10,"timeout":5,"attemptsBeforeDeactivation":2,"statusRegex":"^[234][0-9][0-9]$"},"sessionPersistence":{"persistenceType":"HTTP_COOKIE"},"virtualIps":[{"address":"50.56.49.149","id":2359,"type":"PUBLIC","ipVersion":"IPV4"}],"sourceAddresses":{"ipv6Public":"2001:4801:7901::6/64","ipv4Servicenet":"10.183.252.25","ipv4Public":"184.106.100.25"},"updated":{"time":"2011-12-09T16:51:32Z"},"connectionThrottle":{"maxConnections":100,"minConnections":25,"maxConnectionRate":25,"rateInterval":5},"connectionLogging":{"enabled":true}}} \ No newline at end of file diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_algorithms.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"algorithms":[{"name":"LEAST_CONNECTIONS"},{"name":"RANDOM"},{"name":"ROUND_ROBIN"},{"name":"WEIGHTED_LEAST_CONNECTIONS"},{"name":"WEIGHTED_ROUND_ROBIN"}]} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,48 @@ +{ + "loadBalancers": [ + { + "algorithm": "RANDOM", + "created": { + "time": "2011-04-06T21:25:19+0000" + }, + "id": 8155, + "name": "test0", + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-06T21:25:31+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.25", + "id": 965, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + }, + { + "algorithm": "RANDOM", + "created": { + "time": "2011-04-06T21:26:22+0000" + }, + "id": 8156, + "name": "test1", + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-06T21:26:33+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.83", + "id": 1279, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_nodeaddress.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ +{"loadBalancers": [ + { + "name": "First Loadbalancer", + "id": 1, + "status": "ACTIVE" + }, + { + "name": "Second Loadbalancer", + "id": 2, + "status": "PENDING_UPDATE" + }, + { + "name": "Third Loadbalancer", + "id": 8, + "status": "ERROR" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,48 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50+0000" + }, + "id": 8290, + "name": "test2", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 1 + }, + { + "address": "10.1.0.10", + "condition": "ENABLED", + "id": 30945, + "port": 80, + "status": "ONLINE", + "weight": 1 + } + ], + "port": 80, + "protocol": "HTTP", + "status": "BUILD", + "updated": { + "time": "2011-04-07T16:27:50+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,43 @@ +{"protocols": [ + { + "name": "HTTP", + "port": "80" + }, + { + "name": "FTP", + "port": "21" + }, + { + "name": "IMAPv4", + "port": "143" + }, + { + "name": "POP3", + "port": "110" + }, + { + "name": "SMTP", + "port": "25" + }, + { + "name": "LDAP", + "port": "389" + }, + { + "name": "HTTPS", + "port": "443" + }, + { + "name": "IMAPS", + "port": "993" + }, + { + "name": "POP3S", + "port": "995" + }, + { + "name": "LDAPS", + "port": "636" + } + ] +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json --- libcloud-0.5.0/libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/fixtures/rackspace/_v2_0__auth.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,154 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.ord1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.lon1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "1337", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "1337", + "publicURL": "https://127.0.0.1/v2/1337", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "613469", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "ORD", + "tenantId": "613469", + "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", + "versionList": "https://ord.servers.api.rackspacecloud.com/", + "versionId": "2" + } + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "1337", + "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + }, + { + "name": "cloudLoadBalancers", + "endpoints": [ + { + "region": "SYD", + "tenantId": "11111", + "publicURL": "https://syd.loadbalancers.api.rackspacecloud.com/v1.0/11111" + }, + { + "region": "DFW", + "tenantId": "11111", + "publicURL": "https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111" + }, + { + "region": "ORD", + "tenantId": "11111", + "publicURL": "https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111" + }, + { + "region": "LON", + "tenantId": "11111", + "publicURL": "https://lon.loadbalancers.api.rackspacecloud.com/v1.0/11111" + } + ], + "type": "rax:load-balancer" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/__init__.py libcloud-0.15.1/libcloud/test/loadbalancer/__init__.py --- libcloud-0.5.0/libcloud/test/loadbalancer/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/__init__.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_brightbox.py libcloud-0.15.1/libcloud/test/loadbalancer/test_brightbox.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_brightbox.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_brightbox.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.loadbalancer.base import Member, Algorithm +from libcloud.loadbalancer.drivers.brightbox import BrightboxLBDriver +from libcloud.loadbalancer.types import State + +from libcloud.test import MockHttpTestCase +from libcloud.test.secrets import LB_BRIGHTBOX_PARAMS +from libcloud.test.file_fixtures import LoadBalancerFileFixtures + + +class BrightboxLBTests(unittest.TestCase): + def setUp(self): + BrightboxLBDriver.connectionCls.conn_classes = (None, + BrightboxLBMockHttp) + BrightboxLBMockHttp.type = None + self.driver = BrightboxLBDriver(*LB_BRIGHTBOX_PARAMS) + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 2) + self.assertTrue('tcp' in protocols) + self.assertTrue('http' in protocols) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEqual(len(balancers), 1) + self.assertEqual(balancers[0].id, 'lba-1235f') + self.assertEqual(balancers[0].name, 'lb1') + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + + self.assertEqual(balancer.id, 'lba-1235f') + self.assertEqual(balancer.name, 'lb1') + self.assertEqual(balancer.state, State.RUNNING) + + def test_destroy_balancer(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + + self.assertTrue(self.driver.destroy_balancer(balancer)) + + def test_create_balancer(self): + members = [Member('srv-lv426', None, None)] + + balancer = self.driver.create_balancer(name='lb2', port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=members) + + self.assertEqual(balancer.name, 'lb2') + self.assertEqual(balancer.port, 80) + self.assertEqual(balancer.state, State.PENDING) + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + members = balancer.list_members() + + self.assertEqual(len(members), 1) + self.assertEqual(members[0].balancer, balancer) + self.assertEqual('srv-lv426', members[0].id) + + def test_balancer_attach_member(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + member = balancer.attach_member(Member('srv-kg983', ip=None, + port=None)) + + self.assertEqual(member.id, 'srv-kg983') + + def test_balancer_detach_member(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + member = Member('srv-lv426', None, None) + + self.assertTrue(balancer.detach_member(member)) + + +class BrightboxLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('brightbox') + + def _token(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.OK, self.fixtures.load('token.json')) + + def _1_0_load_balancers(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, + self.fixtures.load('load_balancers.json')) + elif method == 'POST': + body = self.fixtures.load('load_balancers_post.json') + return self.response(httplib.ACCEPTED, body) + + def _1_0_load_balancers_lba_1235f(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('load_balancers_lba_1235f.json') + return self.response(httplib.OK, body) + elif method == 'DELETE': + return self.response(httplib.ACCEPTED, '') + + def _1_0_load_balancers_lba_1235f_add_nodes(self, method, url, body, + headers): + if method == 'POST': + return self.response(httplib.ACCEPTED, '') + + def _1_0_load_balancers_lba_1235f_remove_nodes(self, method, url, body, + headers): + if method == 'POST': + return self.response(httplib.ACCEPTED, '') + + def response(self, status, body): + return (status, body, {'content-type': 'application/json'}, + httplib.responses[status]) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_cloudstack.py libcloud-0.15.1/libcloud/test/loadbalancer/test_cloudstack.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_cloudstack.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_cloudstack.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,125 @@ +import sys + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qsl + +from libcloud.loadbalancer.types import Provider +from libcloud.loadbalancer.providers import get_driver +from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm +from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver + +from libcloud.test import unittest +from libcloud.test import MockHttpTestCase +from libcloud.test.file_fixtures import LoadBalancerFileFixtures + + +class CloudStackLBTests(unittest.TestCase): + def setUp(self): + CloudStackLBDriver.connectionCls.conn_classes = \ + (None, CloudStackMockHttp) + + CloudStackLBDriver.path = '/test/path' + CloudStackLBDriver.type = -1 + CloudStackLBDriver.name = 'CloudStack' + self.driver = CloudStackLBDriver('apikey', 'secret') + CloudStackMockHttp.fixture_tag = 'default' + self.driver.connection.poll_interval = 0.0 + + def test_user_must_provide_host_and_path(self): + CloudStackLBDriver.path = None + CloudStackLBDriver.type = Provider.CLOUDSTACK + + expected_msg = 'When instantiating CloudStack driver directly ' + \ + 'you also need to provide host and path argument' + cls = get_driver(Provider.CLOUDSTACK) + + self.assertRaisesRegexp(Exception, expected_msg, cls, + 'key', 'secret') + + try: + cls('key', 'secret', True, 'localhost', '/path') + except Exception: + self.fail('host and path provided but driver raised an exception') + + def test_list_supported_algorithms(self): + algorithms = self.driver.list_supported_algorithms() + + self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) + self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + for balancer in balancers: + self.assertTrue(isinstance(balancer, LoadBalancer)) + + def test_create_balancer(self): + members = [Member(1, '1.1.1.1', 80), Member(2, '1.1.1.2', 80)] + balancer = self.driver.create_balancer('fake', members) + self.assertTrue(isinstance(balancer, LoadBalancer)) + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + self.driver.destroy_balancer(balancer) + + def test_balancer_attach_member(self): + balancer = self.driver.list_balancers()[0] + member = Member(id=1234, ip='1.1.1.1', port=80) + balancer.attach_member(member) + + def test_balancer_detach_member(self): + balancer = self.driver.list_balancers()[0] + member = balancer.list_members()[0] + balancer.detach_member(member) + + def test_balancer_list_members(self): + balancer = self.driver.list_balancers()[0] + members = balancer.list_members() + for member in members: + self.assertTrue(isinstance(member, Member)) + self.assertEqual(member.balancer, balancer) + + +class CloudStackMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('cloudstack') + fixture_tag = 'default' + + def _load_fixture(self, fixture): + body = self.fixtures.load(fixture) + return body, json.loads(body) + + def _test_path(self, method, url, body, headers): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + del query['apiKey'] + del query['response'] + del query['signature'] + command = query.pop('command') + + if hasattr(self, '_cmd_' + command): + return getattr(self, '_cmd_' + command)(**query) + else: + fixture = command + '_' + self.fixture_tag + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + + def _cmd_queryAsyncJobResult(self, jobid): + fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_elb.py libcloud-0.15.1/libcloud/test/loadbalancer/test_elb.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_elb.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_elb.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,188 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.loadbalancer.base import Member, Algorithm +from libcloud.loadbalancer.drivers.elb import ElasticLBDriver +from libcloud.loadbalancer.types import State + +from libcloud.test import MockHttpTestCase +from libcloud.test.secrets import LB_ELB_PARAMS +from libcloud.test.file_fixtures import LoadBalancerFileFixtures + + +class ElasticLBTests(unittest.TestCase): + def setUp(self): + ElasticLBMockHttp.test = self + ElasticLBDriver.connectionCls.conn_classes = (None, + ElasticLBMockHttp) + ElasticLBMockHttp.type = None + ElasticLBMockHttp.use_param = 'Action' + + self.driver = ElasticLBDriver(*LB_ELB_PARAMS) + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 4) + self.assertTrue('tcp' in protocols) + self.assertTrue('http' in protocols) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEqual(len(balancers), 1) + self.assertEqual(balancers[0].id, 'tests') + self.assertEqual(balancers[0].name, 'tests') + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='tests') + + self.assertEqual(balancer.id, 'tests') + self.assertEqual(balancer.name, 'tests') + self.assertEqual(balancer.state, State.UNKNOWN) + + def test_destroy_balancer(self): + balancer = self.driver.get_balancer(balancer_id='tests') + + self.assertTrue(self.driver.destroy_balancer(balancer)) + + def test_create_balancer(self): + members = [Member('srv-lv426', None, None)] + + balancer = self.driver.create_balancer(name='lb2', port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=members) + + self.assertEqual(balancer.name, 'lb2') + self.assertEqual(balancer.port, 80) + self.assertEqual(balancer.state, State.PENDING) + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer(balancer_id='tests') + members = balancer.list_members() + + self.assertEqual(len(members), 1) + self.assertEqual(members[0].balancer, balancer) + self.assertEqual('i-64bd081c', members[0].id) + + def test_balancer_detach_member(self): + balancer = self.driver.get_balancer(balancer_id='lba-1235f') + member = Member('i-64bd081c', None, None) + + self.assertTrue(balancer.detach_member(member)) + + def test_ex_list_balancer_policies(self): + balancer = self.driver.get_balancer(balancer_id='tests') + policies = self.driver.ex_list_balancer_policies(balancer) + + self.assertTrue('MyDurationStickyPolicy' in policies) + + def test_ex_list_balancer_policy_types(self): + policy_types = self.driver.ex_list_balancer_policy_types() + + self.assertTrue('ProxyProtocolPolicyType' in policy_types) + + def test_ex_create_balancer_policy(self): + self.assertTrue( + self.driver.ex_create_balancer_policy( + name='tests', + policy_name='MyDurationProxyPolicy', + policy_type='ProxyProtocolPolicyType')) + + def test_ex_delete_balancer_policy(self): + self.assertTrue( + self.driver.ex_delete_balancer_policy( + name='tests', + policy_name='MyDurationProxyPolicy')) + + def test_ex_set_balancer_policies_listener(self): + self.assertTrue( + self.driver.ex_set_balancer_policies_listener( + name='tests', + port=80, + policies=['MyDurationStickyPolicy'])) + + def test_ex_set_balancer_policies_backend_server(self): + self.assertTrue( + self.driver.ex_set_balancer_policies_backend_server( + name='tests', + instance_port=80, + policies=['MyDurationProxyPolicy'])) + + def text_ex_create_balancer_listeners(self): + self.assertTrue( + self.driver.ex_create_balancer_listeners( + name='tests', + listeners=[[1024, 65533, 'HTTP']])) + + +class ElasticLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('elb') + + def _2012_06_01_DescribeLoadBalancers(self, method, url, body, headers): + body = self.fixtures.load('describe_load_balancers.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_CreateLoadBalancer(self, method, url, body, headers): + body = self.fixtures.load('create_load_balancer.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_DeregisterInstancesFromLoadBalancer(self, method, url, + body, headers): + body = self.fixtures.load( + 'deregister_instances_from_load_balancer.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_CreateLoadBalancerPolicy(self, method, url, body, headers): + body = self.fixtures.load('create_load_balancer_policy.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_DeleteLoadBalancer(self, method, url, body, headers): + body = '' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_DescribeLoadBalancerPolicies(self, method, url, body, + headers): + body = self.fixtures.load('describe_load_balancer_policies.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_DescribeLoadBalancerPolicyTypes(self, method, url, body, + headers): + body = self.fixtures.load('describe_load_balancers_policy_types.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_DeleteLoadBalancerPolicy(self, method, url, body, headers): + body = '' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_SetLoadBalancerPoliciesOfListener(self, method, url, body, + headers): + body = self.fixtures.load('set_load_balancer_policies_of_listener.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _2012_06_01_SetLoadBalancerPoliciesForBackendServer(self, method, url, + body, headers): + body = self.fixtures.load( + 'set_load_balancer_policies_for_backend_server.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_gce.py libcloud-0.15.1/libcloud/test/loadbalancer/test_gce.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_gce.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_gce.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,207 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for Google Compute Engine Load Balancer Driver +""" +import sys +import unittest + +from libcloud.common.google import (GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleBaseConnection) +from libcloud.compute.drivers.gce import (GCENodeDriver) +from libcloud.loadbalancer.drivers.gce import (GCELBDriver) +from libcloud.test.common.test_google import GoogleAuthMockHttp +from libcloud.test.compute.test_gce import GCEMockHttp + +from libcloud.test import LibcloudTestCase + +from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS + + +class GCELoadBalancerTest(LibcloudTestCase): + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + datacenter = 'us-central1-a' + + def setUp(self): + GCEMockHttp.test = self + GCELBDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp) + GCENodeDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp) + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + GCEMockHttp.type = None + kwargs = GCE_KEYWORD_PARAMS.copy() + kwargs['auth_type'] = 'IA' + kwargs['datacenter'] = self.datacenter + self.driver = GCELBDriver(*GCE_PARAMS, **kwargs) + + def test_get_node_from_ip(self): + ip = '23.236.58.15' + expected_name = 'node-name' + node = self.driver._get_node_from_ip(ip) + self.assertEqual(node.name, expected_name) + + dummy_ip = '8.8.8.8' + node = self.driver._get_node_from_ip(dummy_ip) + self.assertTrue(node is None) + + def test_list_protocols(self): + expected_protocols = ['TCP', 'UDP'] + protocols = self.driver.list_protocols() + self.assertEqual(protocols, expected_protocols) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + balancers_all = self.driver.list_balancers(ex_region='all') + balancer_name = 'lcforwardingrule' + self.assertEqual(len(balancers), 2) + self.assertEqual(len(balancers_all), 2) + self.assertEqual(balancers[0].name, balancer_name) + + def test_create_balancer(self): + balancer_name = 'libcloud-lb-demo-lb' + tp_name = '%s-tp' % (balancer_name) + port = '80' + protocol = 'tcp' + algorithm = None + node0 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-000', + 'us-central1-b') + node1 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + members = [node0, node1] + balancer = self.driver.create_balancer(balancer_name, port, protocol, + algorithm, members) + self.assertEqual(balancer.name, balancer_name) + self.assertEqual(balancer.extra['targetpool'].name, tp_name) + self.assertEqual(len(balancer.list_members()), 3) + + def test_destory_balancer(self): + balancer_name = 'lcforwardingrule' + balancer = self.driver.get_balancer(balancer_name) + destroyed = balancer.destroy() + self.assertTrue(destroyed) + + def test_get_balancer(self): + balancer_name = 'lcforwardingrule' + tp_name = 'lctargetpool' + balancer_ip = '173.255.119.224' + balancer = self.driver.get_balancer(balancer_name) + self.assertEqual(balancer.name, balancer_name) + self.assertEqual(balancer.extra['forwarding_rule'].name, balancer_name) + self.assertEqual(balancer.ip, balancer_ip) + self.assertEqual(balancer.extra['targetpool'].name, tp_name) + + def test_attach_compute_node(self): + node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + balancer = self.driver.get_balancer('lcforwardingrule') + member = self.driver._node_to_member(node, balancer) + # Detach member first + balancer.detach_member(member) + self.assertEqual(len(balancer.list_members()), 1) + # Attach Node + balancer.attach_compute_node(node) + self.assertEqual(len(balancer.list_members()), 2) + + def test_detach_attach_member(self): + node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + balancer = self.driver.get_balancer('lcforwardingrule') + member = self.driver._node_to_member(node, balancer) + + # Check that balancer has 2 members + self.assertEqual(len(balancer.list_members()), 2) + + # Remove a member and check that it now has 1 member + balancer.detach_member(member) + self.assertEqual(len(balancer.list_members()), 1) + + # Reattach member and check that it has 2 members again + balancer.attach_member(member) + self.assertEqual(len(balancer.list_members()), 2) + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer('lcforwardingrule') + members = balancer.list_members() + self.assertEqual(len(members), 2) + member_ips = [m.ip for m in members] + self.assertTrue('23.236.58.15' in member_ips) + + def test_ex_create_healthcheck(self): + healthcheck_name = 'lchealthcheck' + kwargs = {'host': 'lchost', + 'path': '/lc', + 'port': 8000, + 'interval': 10, + 'timeout': 10, + 'unhealthy_threshold': 4, + 'healthy_threshold': 3} + hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs) + self.assertEqual(hc.name, healthcheck_name) + self.assertEqual(hc.path, '/lc') + self.assertEqual(hc.port, 8000) + self.assertEqual(hc.interval, 10) + + def test_ex_list_healthchecks(self): + healthchecks = self.driver.ex_list_healthchecks() + self.assertEqual(len(healthchecks), 3) + self.assertEqual(healthchecks[0].name, 'basic-check') + + def test_ex_balancer_detach_attach_healthcheck(self): + healthcheck = self.driver.gce.ex_get_healthcheck( + 'libcloud-lb-demo-healthcheck') + balancer = self.driver.get_balancer('lcforwardingrule') + + healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) + self.assertEqual(len(healthchecks), 1) + # Detach Healthcheck + detach_healthcheck = self.driver.ex_balancer_detach_healthcheck( + balancer, healthcheck) + self.assertTrue(detach_healthcheck) + healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) + self.assertEqual(len(healthchecks), 0) + + # Reattach Healthcheck + attach_healthcheck = self.driver.ex_balancer_attach_healthcheck( + balancer, healthcheck) + self.assertTrue(attach_healthcheck) + healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) + self.assertEqual(len(healthchecks), 1) + + def test_ex_balancer_list_healthchecks(self): + balancer = self.driver.get_balancer('lcforwardingrule') + healthchecks = self.driver.ex_balancer_list_healthchecks(balancer) + self.assertEqual(healthchecks[0].name, 'libcloud-lb-demo-healthcheck') + + def test_node_to_member(self): + node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001', + 'us-central1-b') + balancer = self.driver.get_balancer('lcforwardingrule') + member = self.driver._node_to_member(node, balancer) + self.assertEqual(member.ip, node.public_ips[0]) + self.assertEqual(member.id, node.name) + self.assertEqual(member.port, balancer.port) + + def test_forwarding_rule_to_loadbalancer(self): + fwr = self.driver.gce.ex_get_forwarding_rule('lcforwardingrule') + balancer = self.driver._forwarding_rule_to_loadbalancer(fwr) + self.assertEqual(fwr.name, balancer.name) + self.assertEqual(fwr.address, balancer.ip) + self.assertEqual(fwr.extra['portRange'], balancer.port) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_gogrid.py libcloud-0.15.1/libcloud/test/loadbalancer/test_gogrid.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_gogrid.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_gogrid.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,197 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse + +from libcloud.common.types import LibcloudError +from libcloud.compute.base import Node +from libcloud.compute.drivers.dummy import DummyNodeDriver +from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm +from libcloud.loadbalancer.drivers.gogrid import GoGridLBDriver + +from libcloud.test import MockHttpTestCase +from libcloud.test.file_fixtures import LoadBalancerFileFixtures + + +class GoGridTests(unittest.TestCase): + + def setUp(self): + GoGridLBDriver.connectionCls.conn_classes = (None, + GoGridLBMockHttp) + GoGridLBMockHttp.type = None + self.driver = GoGridLBDriver('user', 'key') + + def test_list_supported_algorithms(self): + algorithms = self.driver.list_supported_algorithms() + + self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) + self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 1) + self.assertEqual(protocols[0], 'http') + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEqual(len(balancers), 2) + self.assertEqual(balancers[0].name, "foo") + self.assertEqual(balancers[0].id, "23517") + self.assertEqual(balancers[1].name, "bar") + self.assertEqual(balancers[1].id, "23526") + + def test_create_balancer(self): + balancer = self.driver.create_balancer(name='test2', + port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=( + Member( + None, '10.1.0.10', 80), + Member(None, '10.1.0.11', 80)) + ) + + self.assertEqual(balancer.name, 'test2') + self.assertEqual(balancer.id, '123') + + def test_create_balancer_UNEXPECTED_ERROR(self): + # Try to create new balancer and attach members with an IP address which + # does not belong to this account + GoGridLBMockHttp.type = 'UNEXPECTED_ERROR' + + try: + self.driver.create_balancer(name='test2', + port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=(Member(None, '10.1.0.10', 80), + Member(None, '10.1.0.11', 80)) + ) + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue( + str(e).find('tried to add a member with an IP address not assigned to your account') != -1) + else: + self.fail('Exception was not thrown') + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + + ret1 = self.driver.destroy_balancer(balancer) + ret2 = balancer.destroy() + + self.assertTrue(ret1) + self.assertTrue(ret2) + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='23530') + + self.assertEqual(balancer.name, 'test2') + self.assertEqual(balancer.id, '23530') + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer(balancer_id='23530') + members1 = self.driver.balancer_list_members(balancer=balancer) + members2 = balancer.list_members() + + expected_members = set(['10.0.0.78:80', '10.0.0.77:80', + '10.0.0.76:80']) + + self.assertEqual(len(members1), 3) + self.assertEqual(len(members2), 3) + self.assertEqual(expected_members, + set(["%s:%s" % (member.ip, member.port) for member in members1])) + self.assertEquals(members1[0].balancer, balancer) + + def test_balancer_attach_compute_node(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + node = Node(id='1', name='test', state=None, public_ips=['10.0.0.75'], + private_ips=[], driver=DummyNodeDriver) + member1 = self.driver.balancer_attach_compute_node(balancer, node) + member2 = balancer.attach_compute_node(node) + + self.assertEqual(member1.ip, '10.0.0.75') + self.assertEqual(member1.port, 80) + self.assertEqual(member2.ip, '10.0.0.75') + self.assertEqual(member2.port, 80) + + def test_balancer_attach_member(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + member = Member(None, ip='10.0.0.75', port='80') + member1 = self.driver.balancer_attach_member(balancer, member=member) + member2 = balancer.attach_member(member=member) + + self.assertEqual(member1.ip, '10.0.0.75') + self.assertEqual(member1.port, 80) + self.assertEqual(member2.ip, '10.0.0.75') + self.assertEqual(member2.port, 80) + + def test_balancer_detach_member(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + member = self.driver.balancer_list_members(balancer)[0] + + ret1 = self.driver.balancer_detach_member(balancer, member) + ret2 = balancer.detach_member(member) + + self.assertTrue(ret1) + self.assertTrue(ret2) + + +class GoGridLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('gogrid') + + def _api_grid_loadbalancer_list(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_add(self, method, url, body, headers): + query = urlparse.urlparse(url).query + self.assertTrue(query.find('loadbalancer.type=round+robin') != -1) + + body = self.fixtures.load('loadbalancer_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list_UNEXPECTED_ERROR(self, method, url, body, headers): + return self._api_grid_ip_list(method, url, body, headers) + + def _api_grid_loadbalancer_add_UNEXPECTED_ERROR(self, method, url, body, headers): + body = self.fixtures.load('unexpected_error.json') + return (httplib.INTERNAL_SERVER_ERROR, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_delete(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_get(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_get.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_edit(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_edit.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_ninefold.py libcloud-0.15.1/libcloud/test/loadbalancer/test_ninefold.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_ninefold.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_ninefold.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,15 @@ +import sys +import unittest + +from libcloud.loadbalancer.types import Provider +from libcloud.loadbalancer.providers import get_driver + + +class NinefoldLbTestCase(unittest.TestCase): + def test_driver_instantiation(self): + cls = get_driver(Provider.NINEFOLD) + cls('username', 'key') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/loadbalancer/test_rackspace.py libcloud-0.15.1/libcloud/test/loadbalancer/test_rackspace.py --- libcloud-0.5.0/libcloud/test/loadbalancer/test_rackspace.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/loadbalancer/test_rackspace.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,1511 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import datetime + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode + +from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm +from libcloud.loadbalancer.types import MemberCondition +from libcloud.loadbalancer.drivers.rackspace import RackspaceLBDriver, \ + RackspaceHealthMonitor, RackspaceHTTPHealthMonitor, \ + RackspaceConnectionThrottle, RackspaceAccessRule +from libcloud.loadbalancer.drivers.rackspace import RackspaceUKLBDriver +from libcloud.loadbalancer.drivers.rackspace import RackspaceAccessRuleType +from libcloud.common.types import LibcloudError + +from libcloud.test import unittest +from libcloud.test import MockHttpTestCase +from libcloud.test.file_fixtures import LoadBalancerFileFixtures +from libcloud.test.file_fixtures import OpenStackFixtures + + +class RackspaceLBTests(unittest.TestCase): + + def setUp(self): + RackspaceLBDriver.connectionCls.conn_classes = (None, + RackspaceLBMockHttp) + RackspaceLBMockHttp.type = None + self.driver = RackspaceLBDriver('user', 'key') + self.driver.connection.poll_interval = 0.0 + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + + def test_force_auth_token_kwargs(self): + base_url = 'https://ord.loadbalancer.api.rackspacecloud.com/v1.0/11111' + kwargs = { + 'ex_force_auth_token': 'some-auth-token', + 'ex_force_base_url': base_url + } + driver = RackspaceLBDriver('user', 'key', **kwargs) + driver.list_balancers() + + self.assertEqual(kwargs['ex_force_auth_token'], + driver.connection.auth_token) + self.assertEqual('/v1.0/11111', + driver.connection.request_path) + + def test_force_auth_url_kwargs(self): + kwargs = { + 'ex_force_auth_version': '2.0', + 'ex_force_auth_url': 'https://identity.api.rackspace.com' + } + driver = RackspaceLBDriver('user', 'key', **kwargs) + + self.assertEqual(kwargs['ex_force_auth_url'], + driver.connection._ex_force_auth_url) + self.assertEqual(kwargs['ex_force_auth_version'], + driver.connection._auth_version) + + def test_gets_auth_2_0_endpoint_defaults_to_ord_region(self): + driver = RackspaceLBDriver('user', 'key', + ex_force_auth_version='2.0_password' + ) + driver.connection._populate_hosts_and_request_paths() + + self.assertEqual( + 'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111', + driver.connection.get_endpoint()) + + def test_gets_auth_2_0_endpoint_for_dfw(self): + driver = RackspaceLBDriver('user', 'key', + ex_force_auth_version='2.0_password', + ex_force_region='dfw' + ) + driver.connection._populate_hosts_and_request_paths() + + self.assertEqual( + 'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111', + driver.connection.get_endpoint()) + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 10) + self.assertTrue('http' in protocols) + + def test_ex_list_protocols_with_default_ports(self): + protocols = self.driver.ex_list_protocols_with_default_ports() + + self.assertEqual(len(protocols), 10) + self.assertTrue(('http', 80) in protocols) + + def test_list_supported_algorithms(self): + algorithms = self.driver.list_supported_algorithms() + + self.assertTrue(Algorithm.RANDOM in algorithms) + self.assertTrue(Algorithm.ROUND_ROBIN in algorithms) + self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms) + self.assertTrue(Algorithm.WEIGHTED_ROUND_ROBIN in algorithms) + self.assertTrue(Algorithm.WEIGHTED_LEAST_CONNECTIONS in algorithms) + + def test_ex_list_algorithms(self): + algorithms = self.driver.ex_list_algorithm_names() + + self.assertTrue("RANDOM" in algorithms) + self.assertTrue("ROUND_ROBIN" in algorithms) + self.assertTrue("LEAST_CONNECTIONS" in algorithms) + self.assertTrue("WEIGHTED_ROUND_ROBIN" in algorithms) + self.assertTrue("WEIGHTED_LEAST_CONNECTIONS" in algorithms) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEqual(len(balancers), 2) + self.assertEqual(balancers[0].name, "test0") + self.assertEqual(balancers[0].id, "8155") + self.assertEqual(balancers[0].port, 80) + self.assertEqual(balancers[0].ip, "1.1.1.25") + self.assertEqual(balancers[1].name, "test1") + self.assertEqual(balancers[1].id, "8156") + + def test_list_balancers_ex_member_address(self): + RackspaceLBMockHttp.type = 'EX_MEMBER_ADDRESS' + balancers = self.driver.list_balancers(ex_member_address='127.0.0.1') + + self.assertEqual(len(balancers), 3) + self.assertEqual(balancers[0].name, "First Loadbalancer") + self.assertEqual(balancers[0].id, "1") + self.assertEqual(balancers[1].name, "Second Loadbalancer") + self.assertEqual(balancers[1].id, "2") + self.assertEqual(balancers[2].name, "Third Loadbalancer") + self.assertEqual(balancers[2].id, "8") + + def test_create_balancer(self): + balancer = self.driver.create_balancer(name='test2', + port=80, + algorithm=Algorithm.ROUND_ROBIN, + members=( + Member( + None, '10.1.0.10', 80, + extra={'condition': MemberCondition.DISABLED, + 'weight': 10}), + Member(None, '10.1.0.11', 80)) + ) + + self.assertEqual(balancer.name, 'test2') + self.assertEqual(balancer.id, '8290') + + def test_ex_create_balancer(self): + RackspaceLBDriver.connectionCls.conn_classes = (None, + RackspaceLBWithVIPMockHttp) + RackspaceLBMockHttp.type = None + driver = RackspaceLBDriver('user', 'key') + balancer = driver.ex_create_balancer(name='test2', + port=80, + algorithm=Algorithm.ROUND_ROBIN, + members=( + Member( + None, '10.1.0.11', 80),), + vip='12af' + ) + + self.assertEqual(balancer.name, 'test2') + self.assertEqual(balancer.id, '8290') + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + + ret = self.driver.destroy_balancer(balancer) + self.assertTrue(ret) + + def test_ex_destroy_balancers(self): + balancers = self.driver.list_balancers() + ret = self.driver.ex_destroy_balancers(balancers) + self.assertTrue(ret) + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='8290') + + self.assertEqual(balancer.name, 'test2') + self.assertEqual(balancer.id, '8290') + + def test_get_balancer_extra_vips(self): + balancer = self.driver.get_balancer(balancer_id='18940') + self.assertEqual(balancer.extra["virtualIps"], + [{"address": "50.56.49.149", + "id": 2359, + "type": "PUBLIC", + "ipVersion": "IPV4"}]) + + def test_get_balancer_extra_public_source_ipv4(self): + balancer = self.driver.get_balancer(balancer_id='18940') + self.assertEqual(balancer.extra["ipv4PublicSource"], '184.106.100.25') + + def test_get_balancer_extra_public_source_ipv6(self): + balancer = self.driver.get_balancer(balancer_id='18940') + self.assertEqual(balancer.extra["ipv6PublicSource"], + '2001:4801:7901::6/64') + + def test_get_balancer_extra_private_source_ipv4(self): + balancer = self.driver.get_balancer(balancer_id='18940') + self.assertEqual(balancer.extra["ipv4PrivateSource"], '10.183.252.25') + + def test_get_balancer_extra_members(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.extra['members'] + self.assertEqual(3, len(members)) + self.assertEqual('10.1.0.11', members[0].ip) + self.assertEqual('10.1.0.10', members[1].ip) + self.assertEqual('10.1.0.9', members[2].ip) + + def test_get_balancer_extra_created(self): + balancer = self.driver.get_balancer(balancer_id='8290') + + created_8290 = datetime.datetime(2011, 4, 7, 16, 27, 50) + self.assertEqual(created_8290, balancer.extra['created']) + + def test_get_balancer_extra_updated(self): + balancer = self.driver.get_balancer(balancer_id='8290') + + updated_8290 = datetime.datetime(2011, 4, 7, 16, 28, 12) + self.assertEqual(updated_8290, balancer.extra['updated']) + + def test_get_balancer_extra_access_list(self): + balancer = self.driver.get_balancer(balancer_id='94698') + + access_list = balancer.extra['accessList'] + + self.assertEqual(3, len(access_list)) + self.assertEqual(2883, access_list[0].id) + self.assertEqual("0.0.0.0/0", access_list[0].address) + self.assertEqual(RackspaceAccessRuleType.DENY, + access_list[0].rule_type) + + self.assertEqual(2884, access_list[1].id) + self.assertEqual("2001:4801:7901::6/64", + access_list[1].address) + self.assertEqual(RackspaceAccessRuleType.ALLOW, + access_list[1].rule_type) + + self.assertEqual(3006, access_list[2].id) + self.assertEqual("8.8.8.8/0", access_list[2].address) + self.assertEqual(RackspaceAccessRuleType.DENY, + access_list[2].rule_type) + + def test_get_balancer_algorithm(self): + balancer = self.driver.get_balancer(balancer_id='8290') + self.assertEqual(balancer.extra["algorithm"], Algorithm.RANDOM) + + def test_get_balancer_protocol(self): + balancer = self.driver.get_balancer(balancer_id='94695') + self.assertEqual(balancer.extra['protocol'], 'HTTP') + + def test_get_balancer_weighted_round_robin_algorithm(self): + balancer = self.driver.get_balancer(balancer_id='94692') + self.assertEqual(balancer.extra["algorithm"], + Algorithm.WEIGHTED_ROUND_ROBIN) + + def test_get_balancer_weighted_least_connections_algorithm(self): + balancer = self.driver.get_balancer(balancer_id='94693') + self.assertEqual(balancer.extra["algorithm"], + Algorithm.WEIGHTED_LEAST_CONNECTIONS) + + def test_get_balancer_unknown_algorithm(self): + balancer = self.driver.get_balancer(balancer_id='94694') + self.assertFalse('algorithm' in balancer.extra) + + def test_get_balancer_connect_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='94695') + balancer_health_monitor = balancer.extra["healthMonitor"] + + self.assertEqual(balancer_health_monitor.type, "CONNECT") + self.assertEqual(balancer_health_monitor.delay, 10) + self.assertEqual(balancer_health_monitor.timeout, 5) + self.assertEqual(balancer_health_monitor.attempts_before_deactivation, + 2) + + def test_get_balancer_http_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='94696') + balancer_health_monitor = balancer.extra["healthMonitor"] + + self.assertEqual(balancer_health_monitor.type, "HTTP") + self.assertEqual(balancer_health_monitor.delay, 10) + self.assertEqual(balancer_health_monitor.timeout, 5) + self.assertEqual(balancer_health_monitor.attempts_before_deactivation, + 2) + self.assertEqual(balancer_health_monitor.path, "/") + self.assertEqual(balancer_health_monitor.status_regex, + "^[234][0-9][0-9]$") + self.assertEqual(balancer_health_monitor.body_regex, + "Hello World!") + + def test_get_balancer_https_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='94697') + balancer_health_monitor = balancer.extra["healthMonitor"] + + self.assertEqual(balancer_health_monitor.type, "HTTPS") + self.assertEqual(balancer_health_monitor.delay, 15) + self.assertEqual(balancer_health_monitor.timeout, 12) + self.assertEqual(balancer_health_monitor.attempts_before_deactivation, + 5) + self.assertEqual(balancer_health_monitor.path, "/test") + self.assertEqual(balancer_health_monitor.status_regex, + "^[234][0-9][0-9]$") + self.assertEqual(balancer_health_monitor.body_regex, "abcdef") + + def test_get_balancer_connection_throttle(self): + balancer = self.driver.get_balancer(balancer_id='94695') + balancer_connection_throttle = balancer.extra["connectionThrottle"] + + self.assertEqual(balancer_connection_throttle.min_connections, 50) + self.assertEqual(balancer_connection_throttle.max_connections, 200) + self.assertEqual(balancer_connection_throttle.max_connection_rate, 50) + self.assertEqual(balancer_connection_throttle.rate_interval_seconds, + 10) + + def test_get_session_persistence(self): + balancer = self.driver.get_balancer(balancer_id='94695') + self.assertEqual(balancer.extra["sessionPersistenceType"], + "HTTP_COOKIE") + + def test_get_connection_logging(self): + balancer = self.driver.get_balancer(balancer_id='94695') + self.assertEqual(balancer.extra["connectionLoggingEnabled"], True) + + def test_get_error_page(self): + balancer = self.driver.get_balancer(balancer_id='18940') + error_page = self.driver.ex_get_balancer_error_page(balancer) + self.assertTrue("The service is temporarily unavailable" in error_page) + + def test_get_access_list(self): + balancer = self.driver.get_balancer(balancer_id='18940') + deny_rule, allow_rule = self.driver.ex_balancer_access_list(balancer) + + self.assertEqual(deny_rule.id, 2883) + self.assertEqual(deny_rule.rule_type, RackspaceAccessRuleType.DENY) + self.assertEqual(deny_rule.address, "0.0.0.0/0") + + self.assertEqual(allow_rule.id, 2884) + self.assertEqual(allow_rule.address, "2001:4801:7901::6/64") + self.assertEqual(allow_rule.rule_type, RackspaceAccessRuleType.ALLOW) + + def test_ex_create_balancer_access_rule(self): + balancer = self.driver.get_balancer(balancer_id='94698') + + rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, + address='0.0.0.0/0') + + rule = self.driver.ex_create_balancer_access_rule(balancer, rule) + + self.assertEqual(2883, rule.id) + + def test_ex_create_balancer_access_rule_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94698') + + rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, + address='0.0.0.0/0') + + resp = self.driver.ex_create_balancer_access_rule_no_poll(balancer, + rule) + + self.assertTrue(resp) + + def test_ex_create_balancer_access_rules(self): + balancer = self.driver.get_balancer(balancer_id='94699') + + rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW, + address='2001:4801:7901::6/64'), + RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, + address='8.8.8.8/0')] + + rules = self.driver.ex_create_balancer_access_rules(balancer, rules) + + self.assertEqual(2, len(rules)) + self.assertEqual(2884, rules[0].id) + self.assertEqual(3006, rules[1].id) + + def test_ex_create_balancer_access_rules_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94699') + + rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW, + address='2001:4801:7901::6/64'), + RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY, + address='8.8.8.8/0')] + + resp = self.driver.ex_create_balancer_access_rules_no_poll(balancer, + rules) + + self.assertTrue(resp) + + def test_ex_destroy_balancer_access_rule(self): + balancer = self.driver.get_balancer(balancer_id='94698') + + rule = RackspaceAccessRule(id='1007', + rule_type=RackspaceAccessRuleType.ALLOW, + address="10.45.13.5/12" + ) + + balancer = self.driver.ex_destroy_balancer_access_rule(balancer, rule) + + rule_ids = [r.id for r in balancer.extra['accessList']] + + self.assertTrue(1007 not in rule_ids) + + def test_ex_destroy_balancer_access_rule_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94698') + + rule = RackspaceAccessRule(id=1007, + rule_type=RackspaceAccessRuleType.ALLOW, + address="10.45.13.5/12" + ) + + resp = self.driver.ex_destroy_balancer_access_rule_no_poll(balancer, + rule) + + self.assertTrue(resp) + + def test_ex_destroy_balancer_access_rules(self): + balancer = self.driver.get_balancer(balancer_id='94699') + balancer = self.driver.ex_destroy_balancer_access_rules(balancer, + balancer.extra['accessList']) + + self.assertEqual('94699', balancer.id) + + def test_ex_destroy_balancer_access_rules_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94699') + + resp = self.driver.ex_destroy_balancer_access_rules_no_poll(balancer, + balancer.extra['accessList']) + + self.assertTrue(resp) + + def test_ex_update_balancer_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='94695') + monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5, + attempts_before_deactivation=2) + + balancer = self.driver.ex_update_balancer_health_monitor( + balancer, monitor) + updated_monitor = balancer.extra['healthMonitor'] + + self.assertEqual('CONNECT', updated_monitor.type) + self.assertEqual(10, updated_monitor.delay) + self.assertEqual(5, updated_monitor.timeout) + self.assertEqual(2, updated_monitor.attempts_before_deactivation) + + def test_ex_update_balancer_http_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='94696') + monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, + attempts_before_deactivation=2, + path='/', + status_regex='^[234][0-9][0-9]$', + body_regex='Hello World!') + + balancer = self.driver.ex_update_balancer_health_monitor( + balancer, monitor) + updated_monitor = balancer.extra['healthMonitor'] + + self.assertEqual('HTTP', updated_monitor.type) + self.assertEqual(10, updated_monitor.delay) + self.assertEqual(5, updated_monitor.timeout) + self.assertEqual(2, updated_monitor.attempts_before_deactivation) + self.assertEqual('/', updated_monitor.path) + self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex) + self.assertEqual('Hello World!', updated_monitor.body_regex) + + def test_ex_update_balancer_health_monitor_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94695') + monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5, + attempts_before_deactivation=2) + + resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer, + monitor) + + self.assertTrue(resp) + + def test_ex_update_balancer_http_health_monitor_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94696') + monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, + attempts_before_deactivation=2, + path='/', + status_regex='^[234][0-9][0-9]$', + body_regex='Hello World!') + + resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer, + monitor) + + self.assertTrue(resp) + + def test_ex_update_balancer_http_health_monitor_with_no_option_body_regex(self): + balancer = self.driver.get_balancer(balancer_id='94700') + monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5, + attempts_before_deactivation=2, + path='/', + status_regex='^[234][0-9][0-9]$', + body_regex='') + + balancer = self.driver.ex_update_balancer_health_monitor( + balancer, monitor) + updated_monitor = balancer.extra['healthMonitor'] + + self.assertEqual('HTTP', updated_monitor.type) + self.assertEqual(10, updated_monitor.delay) + self.assertEqual(5, updated_monitor.timeout) + self.assertEqual(2, updated_monitor.attempts_before_deactivation) + self.assertEqual('/', updated_monitor.path) + self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex) + self.assertEqual('', updated_monitor.body_regex) + + def test_ex_disable_balancer_health_monitor(self): + balancer = self.driver.get_balancer(balancer_id='8290') + balancer = self.driver.ex_disable_balancer_health_monitor(balancer) + + self.assertTrue('healthMonitor' not in balancer.extra) + + def test_ex_disable_balancer_health_monitor_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + resp = self.driver.ex_disable_balancer_health_monitor_no_poll(balancer) + + self.assertTrue(resp) + + def test_ex_update_balancer_connection_throttle(self): + balancer = self.driver.get_balancer(balancer_id='94695') + connection_throttle = RackspaceConnectionThrottle(max_connections=200, + min_connections=50, + max_connection_rate=50, + rate_interval_seconds=10) + + balancer = self.driver.ex_update_balancer_connection_throttle(balancer, + connection_throttle) + updated_throttle = balancer.extra['connectionThrottle'] + + self.assertEqual(200, updated_throttle.max_connections) + self.assertEqual(50, updated_throttle.min_connections) + self.assertEqual(50, updated_throttle.max_connection_rate) + self.assertEqual(10, updated_throttle.rate_interval_seconds) + + def test_ex_update_balancer_connection_throttle_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94695') + connection_throttle = RackspaceConnectionThrottle(max_connections=200, + min_connections=50, + max_connection_rate=50, + rate_interval_seconds=10) + + resp = self.driver.ex_update_balancer_connection_throttle_no_poll( + balancer, connection_throttle) + + self.assertTrue(resp) + + def test_ex_disable_balancer_connection_throttle(self): + balancer = self.driver.get_balancer(balancer_id='8290') + balancer = self.driver.ex_disable_balancer_connection_throttle( + balancer) + + self.assertTrue('connectionThrottle' not in balancer.extra) + + def test_ex_disable_balancer_connection_throttle_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + resp = self.driver.ex_disable_balancer_connection_throttle_no_poll( + balancer) + + self.assertTrue(resp) + + def test_ex_enable_balancer_connection_logging(self): + balancer = self.driver.get_balancer(balancer_id='94695') + balancer = self.driver.ex_enable_balancer_connection_logging( + balancer) + + self.assertTrue(balancer.extra["connectionLoggingEnabled"]) + + def test_ex_enable_balancer_connection_logging_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94695') + resp = self.driver.ex_enable_balancer_connection_logging_no_poll( + balancer) + + self.assertTrue(resp) + + def test_ex_disable_balancer_connection_logging(self): + balancer = self.driver.get_balancer(balancer_id='8290') + balancer = self.driver.ex_disable_balancer_connection_logging( + balancer + ) + + self.assertFalse(balancer.extra["connectionLoggingEnabled"]) + + def test_ex_disable_balancer_connection_logging_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + resp = self.driver.ex_disable_balancer_connection_logging_no_poll( + balancer + ) + + self.assertTrue(resp) + + def test_ex_enable_balancer_session_persistence(self): + balancer = self.driver.get_balancer(balancer_id='94695') + balancer = self.driver.ex_enable_balancer_session_persistence(balancer) + + persistence_type = balancer.extra['sessionPersistenceType'] + self.assertEqual('HTTP_COOKIE', persistence_type) + + def test_ex_enable_balancer_session_persistence_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94695') + resp = self.driver.ex_enable_balancer_session_persistence_no_poll( + balancer) + + self.assertTrue(resp) + + def test_disable_balancer_session_persistence(self): + balancer = self.driver.get_balancer(balancer_id='8290') + balancer = self.driver.ex_disable_balancer_session_persistence( + balancer) + + self.assertTrue('sessionPersistenceType' not in balancer.extra) + + def test_disable_balancer_session_persistence_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + resp = self.driver.ex_disable_balancer_session_persistence_no_poll( + balancer) + + self.assertTrue(resp) + + def test_ex_update_balancer_error_page(self): + balancer = self.driver.get_balancer(balancer_id='8290') + content = "Generic Error Page" + balancer = self.driver.ex_update_balancer_error_page( + balancer, content) + + error_page_content = self.driver.ex_get_balancer_error_page(balancer) + self.assertEqual(content, error_page_content) + + def test_ex_update_balancer_error_page_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + content = "Generic Error Page" + resp = self.driver.ex_update_balancer_error_page_no_poll( + balancer, content) + + self.assertTrue(resp) + + def test_ex_disable_balancer_custom_error_page_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='94695') + resp = self.driver.ex_disable_balancer_custom_error_page_no_poll( + balancer) + + self.assertTrue(resp) + + def test_ex_disable_balancer_custom_error_page(self): + fixtures = LoadBalancerFileFixtures('rackspace') + error_page_fixture = json.loads( + fixtures.load('error_page_default.json')) + + default_error_page = error_page_fixture['errorpage']['content'] + + balancer = self.driver.get_balancer(balancer_id='94695') + balancer = self.driver.ex_disable_balancer_custom_error_page(balancer) + + error_page_content = self.driver.ex_get_balancer_error_page(balancer) + self.assertEqual(default_error_page, error_page_content) + + def test_balancer_list_members(self): + expected = set(['10.1.0.10:80', '10.1.0.11:80', '10.1.0.9:8080']) + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + self.assertEqual(len(members), 3) + self.assertEqual(members[0].balancer, balancer) + self.assertEqual(expected, set(["%s:%s" % (member.ip, member.port) for + member in members])) + + def test_balancer_members_extra_weight(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + self.assertEqual(12, members[0].extra['weight']) + self.assertEqual(8, members[1].extra['weight']) + + def test_balancer_members_extra_condition(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + self.assertEqual(MemberCondition.ENABLED, + members[0].extra['condition']) + self.assertEqual(MemberCondition.DISABLED, + members[1].extra['condition']) + self.assertEqual(MemberCondition.DRAINING, + members[2].extra['condition']) + + def test_balancer_members_extra_status(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + self.assertEqual('ONLINE', members[0].extra['status']) + self.assertEqual('OFFLINE', members[1].extra['status']) + self.assertEqual('DRAINING', members[2].extra['status']) + + def test_balancer_attach_member(self): + balancer = self.driver.get_balancer(balancer_id='8290') + extra = {'condition': MemberCondition.DISABLED, + 'weight': 10} + member = balancer.attach_member(Member(None, ip='10.1.0.12', + port='80', extra=extra)) + + self.assertEqual(member.ip, '10.1.0.12') + self.assertEqual(member.port, 80) + + def test_balancer_attach_member_with_no_condition_specified(self): + balancer = self.driver.get_balancer(balancer_id='8291') + member = balancer.attach_member(Member(None, ip='10.1.0.12', + port='80')) + + self.assertEqual(member.ip, '10.1.0.12') + self.assertEqual(member.port, 80) + + def test_balancer_attach_members(self): + balancer = self.driver.get_balancer(balancer_id='8292') + members = [Member(None, ip='10.1.0.12', port='80'), + Member(None, ip='10.1.0.13', port='80')] + + attached_members = self.driver.ex_balancer_attach_members(balancer, + members) + + first_member = attached_members[0] + second_member = attached_members[1] + self.assertEqual(first_member.ip, '10.1.0.12') + self.assertEqual(first_member.port, 80) + self.assertEqual(second_member.ip, '10.1.0.13') + self.assertEqual(second_member.port, 80) + + def test_balancer_detach_member(self): + balancer = self.driver.get_balancer(balancer_id='8290') + member = balancer.list_members()[0] + + ret = balancer.detach_member(member) + self.assertTrue(ret) + + def test_ex_detach_members(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + balancer = self.driver.ex_balancer_detach_members(balancer, members) + + self.assertEqual('8290', balancer.id) + + def test_ex_detach_members_no_poll(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + ret = self.driver.ex_balancer_detach_members_no_poll(balancer, members) + self.assertTrue(ret) + + def test_update_balancer_protocol(self): + balancer = LoadBalancer(id='3130', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer( + balancer, protocol='HTTPS') + self.assertEqual('HTTPS', updated_balancer.extra['protocol']) + + def test_update_balancer_protocol_to_imapv2(self): + balancer = LoadBalancer(id='3135', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer( + balancer, protocol='imapv2') + self.assertEqual('IMAPv2', updated_balancer.extra['protocol']) + + def test_update_balancer_protocol_to_imapv3(self): + balancer = LoadBalancer(id='3136', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer( + balancer, protocol='IMAPV3') + self.assertEqual('IMAPv3', updated_balancer.extra['protocol']) + + def test_update_balancer_protocol_to_imapv4(self): + balancer = LoadBalancer(id='3137', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer( + balancer, protocol='IMAPv4') + self.assertEqual('IMAPv4', updated_balancer.extra['protocol']) + + def test_update_balancer_port(self): + balancer = LoadBalancer(id='3131', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer(balancer, port=1337) + self.assertEqual(1337, updated_balancer.port) + + def test_update_balancer_name(self): + balancer = LoadBalancer(id='3132', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer( + balancer, name='new_lb_name') + self.assertEqual('new_lb_name', updated_balancer.name) + + def test_update_balancer_algorithm(self): + balancer = LoadBalancer(id='3133', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + updated_balancer = self.driver.update_balancer(balancer, + algorithm=Algorithm.ROUND_ROBIN) + self.assertEqual( + Algorithm.ROUND_ROBIN, updated_balancer.extra['algorithm']) + + def test_update_balancer_bad_algorithm_exception(self): + balancer = LoadBalancer(id='3134', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + try: + self.driver.update_balancer(balancer, + algorithm='HAVE_MERCY_ON_OUR_SERVERS') + except LibcloudError: + pass + else: + self.fail( + 'Should have thrown an exception with bad algorithm value') + + def test_ex_update_balancer_no_poll_protocol(self): + balancer = LoadBalancer(id='3130', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + action_succeeded = self.driver.ex_update_balancer_no_poll( + balancer, + protocol='HTTPS') + self.assertTrue(action_succeeded) + + def test_ex_update_balancer_no_poll_port(self): + balancer = LoadBalancer(id='3131', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + action_succeeded = self.driver.ex_update_balancer_no_poll( + balancer, + port=1337) + self.assertTrue(action_succeeded) + + def test_ex_update_balancer_no_poll_name(self): + balancer = LoadBalancer(id='3132', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + + action_succeeded = self.driver.ex_update_balancer_no_poll( + balancer, + name='new_lb_name') + self.assertTrue(action_succeeded) + + def test_ex_update_balancer_no_poll_algorithm(self): + balancer = LoadBalancer(id='3133', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + action_succeeded = self.driver.ex_update_balancer_no_poll(balancer, + algorithm=Algorithm.ROUND_ROBIN) + self.assertTrue(action_succeeded) + + def test_ex_update_balancer_no_poll_bad_algorithm_exception(self): + balancer = LoadBalancer(id='3134', name='LB_update', + state='PENDING_UPDATE', ip='10.34.4.3', + port=80, driver=self.driver) + try: + self.driver.update_balancer(balancer, + algorithm='HAVE_MERCY_ON_OUR_SERVERS') + except LibcloudError: + pass + else: + self.fail('Should have thrown exception with bad algorithm value') + + def test_ex_update_balancer_member_extra_attributes(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = self.driver.balancer_list_members(balancer) + + first_member = members[0] + + member = self.driver.ex_balancer_update_member(balancer, first_member, + condition=MemberCondition.ENABLED, weight=12) + + self.assertEqual(MemberCondition.ENABLED, member.extra['condition']) + self.assertEqual(12, member.extra['weight']) + + def test_ex_update_balancer_member_no_poll_extra_attributes(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = self.driver.balancer_list_members(balancer) + + first_member = members[0] + + resp = self.driver.ex_balancer_update_member_no_poll( + balancer, first_member, + condition=MemberCondition.ENABLED, weight=12) + self.assertTrue(resp) + + def test_ex_list_current_usage(self): + balancer = self.driver.get_balancer(balancer_id='8290') + usage = self.driver.ex_list_current_usage(balancer=balancer) + self.assertEqual( + usage['loadBalancerUsageRecords'][0]['incomingTransferSsl'], + 6182163) + + +class RackspaceUKLBTests(RackspaceLBTests): + + def setUp(self): + RackspaceLBDriver.connectionCls.conn_classes = (None, + RackspaceLBMockHttp) + RackspaceLBMockHttp.type = None + self.driver = RackspaceUKLBDriver('user', 'key') + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + + +class RackspaceLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('rackspace') + auth_fixtures = OpenStackFixtures() + + def _v2_0_tokens(self, method, url, body, headers): + body = self.fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_loadbalancers_protocols(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_loadbalancers_protocols.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + def _v1_0_11111_loadbalancers_algorithms(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_algorithms.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == "POST": + json_body = json.loads(body) + + loadbalancer_json = json_body['loadBalancer'] + member_1_json, member_2_json = loadbalancer_json['nodes'] + + self.assertEqual(loadbalancer_json['protocol'], 'HTTP') + self.assertEqual(loadbalancer_json['algorithm'], 'ROUND_ROBIN') + self.assertEqual(loadbalancer_json['virtualIps'][0]['type'], + 'PUBLIC') + self.assertEqual(member_1_json['condition'], 'DISABLED') + self.assertEqual(member_1_json['weight'], 10) + self.assertEqual(member_2_json['condition'], 'ENABLED') + + body = self.fixtures.load('v1_slug_loadbalancers_post.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + elif method == 'DELETE': + balancers = self.fixtures.load('v1_slug_loadbalancers.json') + balancers_json = json.loads(balancers) + + for balancer in balancers_json['loadBalancers']: + id = balancer['id'] + self.assertTrue(urlencode([('id', id)]) in url, + msg='Did not delete balancer with id %d' % id) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_EX_MEMBER_ADDRESS(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_loadbalancers_nodeaddress.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_0_11111_loadbalancers_8155(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_8290.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_nodes(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == "POST": + json_body = json.loads(body) + json_node = json_body['nodes'][0] + self.assertEqual('DISABLED', json_node['condition']) + self.assertEqual(10, json_node['weight']) + response_body = self.fixtures.load( + 'v1_slug_loadbalancers_8290_nodes_post.json') + return (httplib.ACCEPTED, response_body, {}, + httplib.responses[httplib.ACCEPTED]) + elif method == "DELETE": + nodes = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') + json_nodes = json.loads(nodes) + + for node in json_nodes['nodes']: + id = node['id'] + self.assertTrue(urlencode([('id', id)]) in url, + msg='Did not delete member with id %d' % id) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8291(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_8291.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8291_nodes(self, method, url, body, headers): + if method == "POST": + json_body = json.loads(body) + json_node = json_body['nodes'][0] + self.assertEqual('ENABLED', json_node['condition']) + response_body = self.fixtures.load( + 'v1_slug_loadbalancers_8290_nodes_post.json') + return (httplib.ACCEPTED, response_body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8292(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_8292.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8292_nodes(self, method, url, body, headers): + if method == "POST": + json_body = json.loads(body) + json_node_1 = json_body['nodes'][0] + json_node_2 = json_body['nodes'][1] + self.assertEqual('10.1.0.12', json_node_1['address']) + self.assertEqual('10.1.0.13', json_node_2['address']) + response_body = self.fixtures.load( + 'v1_slug_loadbalancers_8292_nodes_post.json') + return (httplib.ACCEPTED, response_body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_nodes_30944(self, method, url, body, headers): + if method == "PUT": + json_body = json.loads(body) + self.assertEqual('ENABLED', json_body['condition']) + self.assertEqual(12, json_body['weight']) + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + elif method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_healthmonitor(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_connectionthrottle(self, method, url, body, headers): + if method == 'DELETE': + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_connectionlogging(self, method, url, body, headers): + # Connection Logging uses a PUT to disable connection logging + if method == 'PUT': + json_body = json.loads(body) + self.assertFalse(json_body["connectionLogging"]["enabled"]) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_sessionpersistence(self, method, url, body, headers): + if method == 'DELETE': + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_errorpage(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'v1_slug_loadbalancers_8290_errorpage.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == 'PUT': + json_body = json.loads(body) + + self.assertEqual('Generic Error Page', + json_body['errorpage']['content']) + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_18940(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_18940_ex_public_ips.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_18945(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_18945_ex_public_ips.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_18940_errorpage(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_18940_errorpage.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_18940_accesslist(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'v1_slug_loadbalancers_18940_accesslist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_18941(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_18941_ex_private_ips.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94692(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94692_weighted_round_robin.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94693(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94693_weighted_least_connections.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94694(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94694_unknown_algorithm.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94695_full_details.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695_healthmonitor(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + self.assertEqual('CONNECT', json_body['type']) + self.assertEqual(10, json_body['delay']) + self.assertEqual(5, json_body['timeout']) + self.assertEqual(2, json_body['attemptsBeforeDeactivation']) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695_connectionthrottle(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + self.assertEqual(50, json_body['minConnections']) + self.assertEqual(200, json_body['maxConnections']) + self.assertEqual(50, json_body['maxConnectionRate']) + self.assertEqual(10, json_body['rateInterval']) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695_connectionlogging(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + self.assertTrue(json_body["connectionLogging"]["enabled"]) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695_sessionpersistence(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + persistence_type = json_body[ + 'sessionPersistence']['persistenceType'] + self.assertEqual('HTTP_COOKIE', persistence_type) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94695_errorpage(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load("error_page_default.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == 'DELETE': + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94696(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94696_http_health_monitor.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94696_healthmonitor(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + self.assertEqual('HTTP', json_body['type']) + self.assertEqual(10, json_body['delay']) + self.assertEqual(5, json_body['timeout']) + self.assertEqual(2, json_body['attemptsBeforeDeactivation']) + self.assertEqual('/', json_body['path']) + self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex']) + self.assertEqual('Hello World!', json_body['bodyRegex']) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94697(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94697_https_health_monitor.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94698(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94698_with_access_list.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94698_accesslist(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load( + 'v1_slug_loadbalancers_94698_accesslist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == 'POST': + json_body = json.loads(body) + + self.assertEqual('0.0.0.0/0', json_body['networkItem']['address']) + self.assertEqual('DENY', json_body['networkItem']['type']) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94699(self, method, url, body, headers): + if method == 'GET': + # Use the same fixture for batch deletes as for single deletes + body = self.fixtures.load( + 'v1_slug_loadbalancers_94698_with_access_list.json') + json_body = json.loads(body) + json_body['loadBalancer']['id'] = 94699 + + updated_body = json.dumps(json_body) + return (httplib.OK, updated_body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94699_accesslist(self, method, url, body, headers): + if method == 'DELETE': + fixture = 'v1_slug_loadbalancers_94698_with_access_list.json' + fixture_json = json.loads(self.fixtures.load(fixture)) + access_list_json = fixture_json['loadBalancer']['accessList'] + + for access_rule in access_list_json: + id = access_rule['id'] + self.assertTrue(urlencode([('id', id)]) in url, + msg='Did not delete access rule with id %d' % id) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + elif method == 'POST': + + json_body = json.loads(body) + access_list = json_body['accessList'] + self.assertEqual('ALLOW', access_list[0]['type']) + self.assertEqual('2001:4801:7901::6/64', access_list[0]['address']) + self.assertEqual('DENY', access_list[1]['type']) + self.assertEqual('8.8.8.8/0', access_list[1]['address']) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94698_accesslist_1007(self, method, url, body, headers): + if method == 'DELETE': + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94700(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load( + "v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json") + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_94700_healthmonitor(self, method, url, body, headers): + if method == 'PUT': + json_body = json.loads(body) + + self.assertEqual('HTTP', json_body['type']) + self.assertEqual(10, json_body['delay']) + self.assertEqual(5, json_body['timeout']) + self.assertEqual(2, json_body['attemptsBeforeDeactivation']) + self.assertEqual('/', json_body['path']) + self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex']) + self.assertFalse('bodyRegex' in json_body) + + return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3130(self, method, url, body, headers): + """ update_balancer(b, protocol='HTTPS'), then get_balancer('3130') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'protocol': 'HTTPS'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3130 + response_body['loadBalancer']['protocol'] = 'HTTPS' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3131(self, method, url, body, headers): + """ update_balancer(b, port=443), then get_balancer('3131') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'port': 1337}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3131 + response_body['loadBalancer']['port'] = 1337 + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3132(self, method, url, body, headers): + """ update_balancer(b, name='new_lb_name'), then get_balancer('3132') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'name': 'new_lb_name'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3132 + response_body['loadBalancer']['name'] = 'new_lb_name' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3133(self, method, url, body, headers): + """ update_balancer(b, algorithm='ROUND_ROBIN'), then get_balancer('3133') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'algorithm': 'ROUND_ROBIN'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3133 + response_body['loadBalancer']['algorithm'] = 'ROUND_ROBIN' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3134(self, method, url, body, headers): + """ update.balancer(b, algorithm='HAVE_MERCY_ON_OUR_SERVERS') """ + if method == "PUT": + return (httplib.BAD_REQUEST, "", {}, httplib.responses[httplib.BAD_REQUEST]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3135(self, method, url, body, headers): + """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3135') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'protocol': 'IMAPv2'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3135 + response_body['loadBalancer']['protocol'] = 'IMAPv2' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3136(self, method, url, body, headers): + """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3136') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'protocol': 'IMAPv3'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3136 + response_body['loadBalancer']['protocol'] = 'IMAPv3' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_3137(self, method, url, body, headers): + """ update_balancer(b, protocol='IMAPv3'), then get_balancer('3137') """ + if method == "PUT": + json_body = json.loads(body) + self.assertDictEqual(json_body, {'protocol': 'IMAPv4'}) + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + elif method == "GET": + response_body = json.loads( + self.fixtures.load("v1_slug_loadbalancers_3xxx.json")) + response_body['loadBalancer']['id'] = 3137 + response_body['loadBalancer']['protocol'] = 'IMAPv4' + return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + def _v1_0_11111_loadbalancers_8290_usage_current(self, method, url, body, + headers): + if method == 'GET': + body = self.fixtures.load( + 'v1_0_slug_loadbalancers_8290_usage_current.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + raise NotImplementedError + + +class RackspaceLBWithVIPMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('rackspace') + auth_fixtures = OpenStackFixtures() + + def _v2_0_tokens(self, method, url, body, headers): + body = self.fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_loadbalancers(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == "POST": + json_body = json.loads(body) + loadbalancer_json = json_body['loadBalancer'] + + self.assertEqual(loadbalancer_json['virtualIps'][0]['id'], '12af') + + body = self.fixtures.load('v1_slug_loadbalancers_post.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/pricing_test.json libcloud-0.15.1/libcloud/test/pricing_test.json --- libcloud-0.5.0/libcloud/test/pricing_test.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/pricing_test.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "compute": { + "foo": { + "1": 1.00, + "2": 2.00 + } + }, + + "updated": 1309019791 +} diff -Nru libcloud-0.5.0/libcloud/test/secrets.py-dist libcloud-0.15.1/libcloud/test/secrets.py-dist --- libcloud-0.5.0/libcloud/test/secrets.py-dist 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/secrets.py-dist 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make a copy of this file named 'secrets.py' and add your credentials there. +# Note you can run unit tests without setting your credentials. + +BLUEBOX_PARAMS = ('customer_id', 'api_key') +BRIGHTBOX_PARAMS = ('client_id', 'client_secret') +DREAMHOST_PARAMS = ('key',) +EC2_PARAMS = ('access_id', 'secret') +ECP_PARAMS = ('user_name', 'password') +GANDI_PARAMS = ('user',) +GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +# GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication +GCE_KEYWORD_PARAMS = {'project': 'project_name'} +HOSTINGCOM_PARAMS = ('user', 'secret') +IBM_PARAMS = ('user', 'secret') +# OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) +OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) +OPENNEBULA_PARAMS = ('user', 'key') +OPSOURCE_PARAMS = ('user', 'password') +RACKSPACE_PARAMS = ('user', 'key') +RACKSPACE_NOVA_PARAMS = ('user_name', 'api_key', False, 'host', 8774) +SLICEHOST_PARAMS = ('key',) +SOFTLAYER_PARAMS = ('user', 'api_key') +VCLOUD_PARAMS = ('user', 'secret') +VOXEL_PARAMS = ('key', 'secret') +VPSNET_PARAMS = ('user', 'key') +JOYENT_PARAMS = ('user', 'key') +VCL_PARAMS = ('user', 'pass', True, 'foo.bar.com') +GRIDSPOT_PARAMS = ('key',) +HOSTVIRTUAL_PARAMS = ('key',) +DIGITAL_OCEAN_PARAMS = ('user', 'key') +CLOUDFRAMES_PARAMS = ('key', 'secret', False, 'host', 8888) + +# Storage +STORAGE_S3_PARAMS = ('key', 'secret') +STORAGE_GOOGLE_STORAGE_PARAMS = ('key', 'secret') + +# Azure key is b64 encoded and must be decoded before signing requests +STORAGE_AZURE_BLOBS_PARAMS = ('account', 'cGFzc3dvcmQ=') + +# Loadbalancer +LB_BRIGHTBOX_PARAMS = ('user', 'key') +LB_ELB_PARAMS = ('access_id', 'secret', 'region') + +# DNS +DNS_PARAMS_LINODE = ('user', 'key') +DNS_PARAMS_ZERIGO = ('email', 'api token') +DNS_PARAMS_RACKSPACE = ('user', 'key') +DNS_PARAMS_HOSTVIRTUAL = ('key',) +DNS_PARAMS_ROUTE53 = ('access_id', 'secret') +DNS_GANDI = ('user', ) +DNS_PARAMS_GOOGLE = ('email_address', 'key') +DNS_KEYWORD_PARAMS_GOOGLE = {'project': 'project_name'} diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/already_exists.xml libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/already_exists.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/already_exists.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/already_exists.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + 1016 + The resource you are trying to create already exists. + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/empty_directory_listing.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/list_containers.xml libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/list_containers.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/list_containers.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/list_containers.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9 + directory + container1 + + + 860855a4a445b5e45eeef4024371fd5c73ee3ada + directory + container2 + + + 651eae32634bf84529c74eabd555fda48c7cead6 + regular + not-a-container1 + + + 089293be672782a255498b0b05c4877acf23ef9e + directory + container3 + + + bd804e9f356b51844f93273ec8c94b2e274711d0 + directory + container4 + + + b40b0f3a17fad1d8c8b2085f668f8107bb400fa5 + regular + not-a-container-2 + + + 10bd74388b55a3c8c329ff5dd6d21bd55bfb7370 + directory + container5 + + + c04fa4aa3d0adcdf104baa0cef7b6279680a23c3 + directory + container6 + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/not_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/not_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/not_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/not_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + 1023 + The directory you are trying to delete is not empty. + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/not_found.xml libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/not_found.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/atmos/not_found.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/atmos/not_found.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ + + + 1003 + The requested object was not found. + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,29 @@ +ļ»æ + + 2 + + + container1 + https://account.blob.core.windows.net/container1 + + Mon, 07 Jan 2013 06:31:06 GMT + "0x8CFBAB7B4F23346" + unlocked + available + + + + + container2 + https://account.blob.core.windows.net/container2 + + Mon, 07 Jan 2013 06:31:07 GMT + "0x8CFBAB7B5B82D8E" + unlocked + available + + + + + /account/container3 + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,30 @@ +ļ»æ + + /account/container3 + 2 + + + container3 + https://account.blob.core.windows.net/container3 + + Mon, 07 Jan 2013 06:31:08 GMT + "0x8CFBAB7B6452A71" + unlocked + available + + + + + container4 + https://account.blob.core.windows.net/container4 + + Fri, 04 Jan 2013 08:32:41 GMT + "0x8CFB86D32305484" + unlocked + available + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_containers_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + 100 + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,49 @@ +ļ»æ + + 2 + + + object1.txt + https://account.blob.core.windows.net/test_container/object1.txt + + Fri, 04 Jan 2013 09:48:06 GMT + 0x8CFB877BB56A6FB + 0 + application/octet-stream + + + 1B2M2Y8AsgTpgAmY7PhCfg== + + BlockBlob + unlocked + available + + + value1 + value2 + + + + object2.txt + https://account.blob.core.windows.net/test_container/object2.txt + + Sat, 05 Jan 2013 03:51:42 GMT + 0x8CFB90F1BA8CD8F + 1048576 + application/octet-stream + + + ttgbNgpWctgMJ0MPORU+LA== + + BlockBlob + unlocked + available + + + value1 + value2 + + + + 2!76!MDAwMDExIXNvbWUxMTcudHh0ITAwMDAyOCE5OTk5LTEyLTMxVDIzOjU5OjU5Ljk5OTk5OTlaIQ-- + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,43 @@ +ļ»æ + + object3.txt + 2 + + + object3.txt + https://account.blob.core.windows.net/test_container/object3.txt + + Sat, 05 Jan 2013 03:52:08 GMT + 0x8CFB90F2B6FC022 + 1048576 + application/octet-stream + + + ttgbNgpWctgMJ0MPORU+LA== + + BlockBlob + unlocked + available + + + + + object4.txt + https://account.blob.core.windows.net/test_container/object4.txt + + Fri, 04 Jan 2013 10:20:14 GMT + 0x8CFB87C38717450 + 0 + application/octet-stream + + 1B2M2Y8AsgTpgAmY7PhCfg== + + BlockBlob + unlocked + available + + + + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/azure_blobs/list_objects_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ +ļ»æ + + 2 + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_empty.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{} diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,14 @@ +[ + {"name":"foo test 1","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo test 2","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo tes 3","hash":"16265549b5bda64ecdaa5156de4c97ee", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:46.549890"}, + {"name":"foo test 3","hash":"16265549b5bda64ecdaa5156de4c97ff", + "bytes":1160520,"content_type":"application/text", + "last_modified":"2011-01-25T22:01:50.351810"} +] diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,11 @@ +[ + {"name":"foo-test-1","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-2","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-3","hash":"16265549b5bda64ecdaa5156de4c97ee", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:46.549890"} +] diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ +[ + {"name":"foo-test-4","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-5","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"} +] diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_containers_empty.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{} diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_containers.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_containers.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/list_containers.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/list_containers.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,5 @@ +[ + {"name":"container1","count":4,"bytes":3484450}, + {"name":"container2","count":120,"bytes":340084450}, + {"name":"container3","count":0,"bytes":0} +] diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/meta_data.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/meta_data.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/meta_data.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/meta_data.json 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1 @@ +{"bytes_used": 1234567, "container_count": 10, "object_count": 400} diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json --- libcloud-0.5.0/libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/cloudfiles/_v2_0__auth.json 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,128 @@ +{ + "access": { + "token": { + "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", + "expires": "2031-11-23T21:00:14.000-06:00" + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://cdn.clouddrive.com/v1/MossoCloudFS", + "version": { + "versionInfo": "https://cdn2.clouddrive.com/v1/", + "versionList": "https://cdn2.clouddrive.com/", + "versionId": "1" + } + } + + ], + "name": "cloudFilesCDN", + "type": "rax:object-cdn" + }, + { + "endpoints": [ + { + "region": "ORD", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.ord1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + }, + { + "region": "LON", + "tenantId": "MossoCloudFS_11111-111111111-1111111111-1111111", + "publicURL": "https://storage4.lon1.clouddrive.com/v1/MossoCloudFS", + "internalURL": "https://snet-storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111" + } + ], + "name": "cloudFiles", + "type": "object-store" + }, + { + "endpoints": [ + { + "tenantId": "1337", + "publicURL": "https://servers.api.rackspacecloud.com/v1.0/1337", + "version": { + "versionInfo": "https://servers.api.rackspacecloud.com/v1.0/", + "versionList": "https://servers.api.rackspacecloud.com/", + "versionId": "1.0" + } + } + ], + "name": "cloudServers", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "RegionOne", + "tenantId": "1337", + "publicURL": "https://127.0.0.1/v2/1337", + "versionInfo": "https://127.0.0.1/v2/", + "versionList": "https://127.0.0.1/", + "versionId": "2" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "613469", + "publicURL": "https://dfw.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://dfw.servers.api.rackspacecloud.com/v2/", + "versionList": "https://dfw.servers.api.rackspacecloud.com/", + "versionId": "2" + }, + { + "region": "ORD", + "tenantId": "613469", + "publicURL": "https://ord.servers.api.rackspacecloud.com/v2/1337", + "versionInfo": "https://ord.servers.api.rackspacecloud.com/v2/", + "versionList": "https://ord.servers.api.rackspacecloud.com/", + "versionId": "2" + } + ], + "name": "cloudServersOpenStack", + "type": "compute" + }, + { + "endpoints": [ + { + "region": "DFW", + "tenantId": "1337", + "publicURL": "https://preprod.dfw.servers.api.rackspacecloud.com/v2/1337" + } + ], + "name": "cloudServersPreprod", + "type": "compute" + } + ], + "user": { + "id": "7", + "roles": [ + { + "id": "identity:default", + "description": "Default Role.", + "name": "identity:default" + } + ], + "name": "testuser" + } + } +} diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + test_container + + + 1000 + false + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ + + + test_container + + + 1000 + true + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 2.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 3.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,28 @@ + + + test_container + + + 3 + false + + 4.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 5.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_container_objects.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_container_objects.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + test_container + + + 1000 + false + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_containers_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_containers.xml libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_containers.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/google_storage/list_containers.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/google_storage/list_containers.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + test1 + 2011-04-09T12:34:49.000Z + + + test2 + 2011-02-09T12:34:49.000Z + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/complete_multipart.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/complete_multipart.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/complete_multipart.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/complete_multipart.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + http://Example-Bucket.s3.amazonaws.com/Example-Object + Example-Bucket + Example-Object + "3858f62230ac3c915f300c664312c11f-9" + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/initiate_multipart.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/initiate_multipart.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/initiate_multipart.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/initiate_multipart.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,6 @@ + + + example-bucket + example-object + VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,8 @@ + + + test_container + + + 1000 + false + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,38 @@ + + + test_container + + + 1000 + true + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 2.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 3.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,28 @@ + + + test_container + + + 3 + false + + 4.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 5.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_container_objects.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_container_objects.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,18 @@ + + + test_container + + + 1000 + false + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_containers_empty.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_containers_empty.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_containers_empty.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_containers_empty.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,9 @@ + + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_containers.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_containers.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_containers.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_containers.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,16 @@ + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + test1 + 2011-04-09T12:34:49.000Z + + + test2 + 2011-02-09T12:34:49.000Z + + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_multipart_1.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_multipart_1.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_multipart_1.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_multipart_1.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,52 @@ + + + foo_bar_container + + + my-movie.m2ts + YW55IGlkZWEgd2h5IGVsdmluZydzIHVwbG9hZCBmYWlsZWQ + 3 + true + + my-divisor + XMgbGlrZSBlbHZpbmcncyBub3QgaGF2aW5nIG11Y2ggbHVjaw + + arn:aws:iam::111122223333:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de + user1-11111a31-17b5-4fb7-9df5-b111111f13de + + + 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a + OwnerDisplayName + + STANDARD + 2010-11-10T20:48:33.000Z + + + my-movie.m2ts + VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA + + b1d16700c70b0b05597d7acd6a3f92be + InitiatorDisplayName + + + b1d16700c70b0b05597d7acd6a3f92be + OwnerDisplayName + + STANDARD + 2010-11-10T20:48:33.000Z + + + my-movie.m2ts + YW55IGlkZWEgd2h5IGVsdmluZydzIHVwbG9hZCBmYWlsZWQ + + arn:aws:iam::444455556666:user/user1-22222a31-17b5-4fb7-9df5-b222222f13de + user1-22222a31-17b5-4fb7-9df5-b222222f13de + + + b1d16700c70b0b05597d7acd6a3f92be + OwnerDisplayName + + STANDARD + 2010-11-10T20:49:33.000Z + + diff -Nru libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_multipart_2.xml libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_multipart_2.xml --- libcloud-0.5.0/libcloud/test/storage/fixtures/s3/list_multipart_2.xml 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/fixtures/s3/list_multipart_2.xml 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,52 @@ + + + foo_bar_container + + + + + 3 + false + + my-divisor + XMgbGlrZSBlbHZpbmcncyBub3QgaGF2aW5nIG11Y2ggbHVjaw + + arn:aws:iam::111122223333:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de + user1-11111a31-17b5-4fb7-9df5-b111111f13de + + + 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a + OwnerDisplayName + + STANDARD + 2010-11-10T20:48:33.000Z + + + my-movie.m2ts + VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA + + b1d16700c70b0b05597d7acd6a3f92be + InitiatorDisplayName + + + b1d16700c70b0b05597d7acd6a3f92be + OwnerDisplayName + + STANDARD + 2010-11-10T20:48:33.000Z + + + my-movie.m2ts + YW55IGlkZWEgd2h5IGVsdmluZydzIHVwbG9hZCBmYWlsZWQ + + arn:aws:iam::444455556666:user/user1-22222a31-17b5-4fb7-9df5-b222222f13de + user1-22222a31-17b5-4fb7-9df5-b222222f13de + + + b1d16700c70b0b05597d7acd6a3f92be + OwnerDisplayName + + STANDARD + 2010-11-10T20:49:33.000Z + + diff -Nru libcloud-0.5.0/libcloud/test/storage/test_atmos.py libcloud-0.15.1/libcloud/test/storage/test_atmos.py --- libcloud-0.5.0/libcloud/test/storage/test_atmos.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_atmos.py 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,772 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import os.path +import sys +import unittest + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import b + +import libcloud.utils.files + +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerAlreadyExistsError, \ + ContainerDoesNotExistError, \ + ContainerIsNotEmptyError, \ + ObjectDoesNotExistError +from libcloud.storage.drivers.atmos import AtmosConnection, AtmosDriver +from libcloud.storage.drivers.dummy import DummyIterator + +from libcloud.test import StorageMockHttp, MockRawResponse +from libcloud.test.file_fixtures import StorageFileFixtures + + +class AtmosTests(unittest.TestCase): + + def setUp(self): + AtmosDriver.connectionCls.conn_classes = (None, AtmosMockHttp) + AtmosDriver.connectionCls.rawResponseCls = AtmosMockRawResponse + AtmosDriver.path = '' + AtmosMockHttp.type = None + AtmosMockHttp.upload_created = False + AtmosMockRawResponse.type = None + self.driver = AtmosDriver('dummy', base64.b64encode(b('dummy'))) + self._remove_test_file() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_list_containers(self): + AtmosMockHttp.type = 'EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + AtmosMockHttp.type = None + containers = self.driver.list_containers() + self.assertEqual(len(containers), 6) + + def test_list_container_objects(self): + container = Container(name='test_container', extra={}, + driver=self.driver) + + AtmosMockHttp.type = 'EMPTY' + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + AtmosMockHttp.type = None + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 2) + + obj = [o for o in objects if o.name == 'not-a-container1'][0] + self.assertEqual(obj.meta_data['object_id'], + '651eae32634bf84529c74eabd555fda48c7cead6') + self.assertEqual(obj.container.name, 'test_container') + + def test_get_container(self): + container = self.driver.get_container(container_name='test_container') + self.assertEqual(container.name, 'test_container') + self.assertEqual(container.extra['object_id'], + 'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9') + + def test_get_container_escaped(self): + container = self.driver.get_container( + container_name='test & container') + self.assertEqual(container.name, 'test & container') + self.assertEqual(container.extra['object_id'], + 'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9') + + def test_get_container_not_found(self): + try: + self.driver.get_container(container_name='not_found') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + container = self.driver.create_container( + container_name='test_create_container') + self.assertTrue(isinstance(container, Container)) + self.assertEqual(container.name, 'test_create_container') + self.assertEqual(container.extra['object_id'], + '31a27b593629a3fe59f887fd973fd953e80062ce') + + def test_create_container_already_exists(self): + AtmosMockHttp.type = 'ALREADY_EXISTS' + + try: + self.driver.create_container( + container_name='test_create_container') + except ContainerAlreadyExistsError: + pass + else: + self.fail( + 'Container already exists but an exception was not thrown') + + def test_delete_container_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.delete_container(container=container) + self.assertTrue(result) + + def test_delete_container_not_found(self): + AtmosMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail( + 'Container does not exist but an exception was not thrown') + + def test_delete_container_not_empty(self): + AtmosMockHttp.type = 'NOT_EMPTY' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Container is not empty but an exception was not thrown') + + def test_get_object_success(self): + obj = self.driver.get_object(container_name='test_container', + object_name='test_object') + self.assertEqual(obj.container.name, 'test_container') + self.assertEqual(obj.size, 555) + self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') + self.assertEqual(obj.extra['object_id'], + '322dce3763aadc41acc55ef47867b8d74e45c31d6643') + self.assertEqual( + obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') + self.assertEqual(obj.meta_data['foo-bar'], 'test 1') + self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + + def test_get_object_escaped(self): + obj = self.driver.get_object(container_name='test & container', + object_name='test & object') + self.assertEqual(obj.container.name, 'test & container') + self.assertEqual(obj.size, 555) + self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') + self.assertEqual(obj.extra['object_id'], + '322dce3763aadc41acc55ef47867b8d74e45c31d6643') + self.assertEqual( + obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') + self.assertEqual(obj.meta_data['foo-bar'], 'test 1') + self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + + def test_get_object_not_found(self): + try: + self.driver.get_object(container_name='test_container', + object_name='not_found') + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + status = self.driver.delete_object(obj=obj) + self.assertTrue(status) + + def test_delete_object_escaped_success(self): + container = Container(name='foo & bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo & bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + status = self.driver.delete_object(obj=obj) + self.assertTrue(status) + + def test_delete_object_not_found(self): + AtmosMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_escaped_success(self): + container = Container(name='foo & bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo & bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_success_not_found(self): + AtmosMockRawResponse.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, + meta_data=None, + driver=self.driver) + destination_path = os.path.abspath(__file__) + '.temp' + try: + self.driver.download_object( + obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_as_stream(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + + stream = self.driver.download_object_as_stream( + obj=obj, chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_download_object_as_stream_escaped(self): + container = Container(name='foo & bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo & bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + + stream = self.driver.download_object_as_stream( + obj=obj, chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = AtmosDriver._upload_file + AtmosDriver._upload_file = upload_file + path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=path, container=container, + extra=extra, object_name=object_name) + self.assertEqual(obj.name, 'ftu') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + AtmosDriver._upload_file = old_func + + def test_upload_object_no_content_type(self): + def no_content_type(name): + return None, None + + old_func = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = no_content_type + file_path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name) + + # Just check that the file was uploaded OK, as the fallback + # Content-Type header should be set (application/octet-stream). + self.assertEqual(obj.name, object_name) + libcloud.utils.files.guess_file_mime_type = old_func + + def test_upload_object_error(self): + def dummy_content_type(name): + return 'application/zip', None + + def send(instance): + raise Exception('') + + old_func1 = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = dummy_content_type + old_func2 = AtmosMockHttp.send + AtmosMockHttp.send = send + + file_path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except LibcloudError: + pass + else: + self.fail( + 'Timeout while uploading but an exception was not thrown') + finally: + libcloud.utils.files.guess_file_mime_type = old_func1 + AtmosMockHttp.send = old_func2 + + def test_upload_object_nonexistent_file(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = dummy_content_type + + file_path = os.path.abspath(__file__ + '.inexistent') + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except OSError: + pass + else: + self.fail('Inesitent but an exception was not thrown') + finally: + libcloud.utils.files.guess_file_mime_type = old_func + + def test_upload_object_via_stream_new_object(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.storage.drivers.atmos.guess_file_mime_type + libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type + + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftsdn' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + finally: + libcloud.storage.drivers.atmos.guess_file_mime_type = old_func + + def test_upload_object_via_stream_existing_object(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.storage.drivers.atmos.guess_file_mime_type + libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type + + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftsde' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + finally: + libcloud.storage.drivers.atmos.guess_file_mime_type = old_func + + def test_upload_object_via_stream_no_content_type(self): + def no_content_type(name): + return None, None + + old_func = libcloud.storage.drivers.atmos.guess_file_mime_type + libcloud.storage.drivers.atmos.guess_file_mime_type = no_content_type + + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftsdct' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + except AttributeError: + pass + else: + self.fail( + 'File content type not provided' + ' but an exception was not thrown') + finally: + libcloud.storage.drivers.atmos.guess_file_mime_type = old_func + + def test_signature_algorithm(self): + test_uid = 'fredsmagicuid' + test_key = base64.b64encode(b('ssssshhhhhmysecretkey')) + test_date = 'Mon, 04 Jul 2011 07:39:19 GMT' + test_values = [ + ('GET', '/rest/namespace/foo', '', {}, + 'WfSASIA25TuqO2n0aO9k/dtg6S0='), + ('GET', '/rest/namespace/foo%20%26%20bar', '', {}, + 'vmlqXqcInxxoP4YX5mR09BonjX4='), + ('POST', '/rest/namespace/foo', '', {}, + 'oYKdsF+1DOuUT7iX5CJCDym2EQk='), + ('PUT', '/rest/namespace/foo', '', {}, + 'JleF9dpSWhaT3B2swZI3s41qqs4='), + ('DELETE', '/rest/namespace/foo', '', {}, + '2IX+Bd5XZF5YY+g4P59qXV1uLpo='), + ('GET', '/rest/namespace/foo?metata/system', '', {}, + 'zuHDEAgKM1winGnWn3WBsqnz4ks='), + ('POST', '/rest/namespace/foo?metadata/user', '', { + 'x-emc-meta': 'fakemeta=fake, othermeta=faketoo' + }, '7sLx1nxPIRAtocfv02jz9h1BjbU='), + ] + + class FakeDriver(object): + path = '' + + for method, action, api_path, headers, expected in test_values: + c = AtmosConnection(test_uid, test_key) + c.method = method + c.action = action + d = FakeDriver() + d.path = api_path + c.driver = d + headers = c.add_default_headers(headers) + headers['Date'] = headers['x-emc-date'] = test_date + self.assertEqual(c._calculate_signature({}, headers), + b(expected).decode('utf-8')) + + +class AtmosMockHttp(StorageMockHttp, unittest.TestCase): + fixtures = StorageFileFixtures('atmos') + upload_created = False + upload_stream_created = False + + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self) + + if kwargs.get('host', None) and kwargs.get('port', None): + StorageMockHttp.__init__(self, *args, **kwargs) + + self._upload_object_via_stream_first_request = True + + def runTest(self): + pass + + def request(self, method, url, body=None, headers=None, raw=False): + headers = headers or {} + parsed = urlparse.urlparse(url) + if parsed.query.startswith('metadata/'): + parsed = list(parsed) + parsed[2] = parsed[2] + '/' + parsed[4] + parsed[4] = '' + url = urlparse.urlunparse(parsed) + return super(AtmosMockHttp, self).request(method, url, body, headers, + raw) + + def _rest_namespace_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('empty_directory_listing.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('empty_directory_listing.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container__metadata_system( + self, method, url, body, + headers): + headers = { + 'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9' + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_20_26_20container__metadata_system( + self, method, url, body, + headers): + headers = { + 'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9' + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_not_found__metadata_system(self, method, url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_test_create_container(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_create_container__metadata_system(self, method, + url, body, + headers): + headers = { + 'x-emc-meta': 'objectid=31a27b593629a3fe59f887fd973fd953e80062ce' + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_create_container_ALREADY_EXISTS(self, method, url, + body, headers): + body = self.fixtures.load('already_exists.xml') + return (httplib.BAD_REQUEST, body, {}, + httplib.responses[httplib.BAD_REQUEST]) + + def _rest_namespace_foo_bar_container(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_NOT_FOUND(self, method, url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_foo_bar_container_NOT_EMPTY(self, method, url, body, + headers): + body = self.fixtures.load('not_empty.xml') + return (httplib.BAD_REQUEST, body, {}, + httplib.responses[httplib.BAD_REQUEST]) + + def _rest_namespace_test_container_test_object_metadata_system( + self, method, + url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_system( + self, method, + url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_test_object_metadata_user(self, method, + url, body, + headers): + meta = { + 'md5': '6b21c4a111ac178feacf9ec9d0c71f17', + 'foo-bar': 'test 1', + 'bar-foo': 'test 2', + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_user( + self, method, + url, body, + headers): + meta = { + 'md5': '6b21c4a111ac178feacf9ec9d0c71f17', + 'foo-bar': 'test 1', + 'bar-foo': 'test 2', + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_not_found_metadata_system(self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url, + body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object( + self, method, url, + body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_fbc_ftu_metadata_system(self, method, url, body, + headers): + if not self.upload_created: + self.__class__.upload_created = True + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + self.__class__.upload_created = False + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftu_metadata_user(self, method, url, body, headers): + self.assertTrue('x-emc-meta' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsdn_metadata_system(self, method, url, body, + headers): + if not self.upload_stream_created: + self.__class__.upload_stream_created = True + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + self.__class__.upload_stream_created = False + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsdn(self, method, url, body, headers): + if self._upload_object_via_stream_first_request: + self.assertTrue('Range' not in headers) + self.assertEqual(method, 'POST') + self._upload_object_via_stream_first_request = False + else: + self.assertTrue('Range' in headers) + self.assertEqual(method, 'PUT') + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsdn_metadata_user(self, method, url, body, + headers): + self.assertTrue('x-emc-meta' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsde_metadata_system(self, method, url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsde(self, method, url, body, headers): + if self._upload_object_via_stream_first_request: + self.assertTrue('Range' not in headers) + self._upload_object_via_stream_first_request = False + else: + self.assertTrue('Range' in headers) + self.assertEqual(method, 'PUT') + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsde_metadata_user(self, method, url, body, + headers): + self.assertTrue('x-emc-meta' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsd_metadata_system(self, method, url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + +class AtmosMockRawResponse(MockRawResponse): + fixtures = StorageFileFixtures('atmos') + + def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url, + body, headers): + body = self._generate_random_data(1000) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object( + self, method, url, + body, headers): + body = self._generate_random_data(1000) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_fbc_ftu(self, method, url, body, headers): + return (httplib.CREATED, '', {}, httplib.responses[httplib.CREATED]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_azure_blobs.py libcloud-0.15.1/libcloud/test/storage/test_azure_blobs.py --- libcloud-0.5.0/libcloud/test/storage/test_azure_blobs.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_azure_blobs.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,954 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +import unittest +import tempfile + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver +from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE +from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE +from libcloud.storage.drivers.dummy import DummyIterator + +from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 +from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS + + +class AzureBlobsMockHttp(StorageMockHttp, MockHttpTestCase): + + fixtures = StorageFileFixtures('azure_blobs') + base_headers = {} + + def _UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, + '', + self.base_headers, + httplib.responses[httplib.UNAUTHORIZED]) + + def _list_containers_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_containers_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers(self, method, url, body, headers): + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if 'marker' not in query: + body = self.fixtures.load('list_containers_1.xml') + else: + body = self.fixtures.load('list_containers_2.xml') + + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_EMPTY(self, method, url, body, headers): + if method == 'DELETE': + body = '' + return (httplib.ACCEPTED, + body, + self.base_headers, + httplib.responses[httplib.ACCEPTED]) + + else: + body = self.fixtures.load('list_objects_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _new__container_INVALID_NAME(self, method, url, body, headers): + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + def _test_container(self, method, url, body, headers): + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if 'marker' not in query: + body = self.fixtures.load('list_objects_1.xml') + else: + body = self.fixtures.load('list_objects_2.xml') + + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container100(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + return (httplib.NOT_FOUND, + body, + self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _test_container200(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + headers = {} + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-meta1'] = 'value1' + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _test_container200_test(self, method, url, body, headers): + body = '' + + if method != 'HEAD': + return (httplib.BAD_REQUEST, + body, + self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + headers = {} + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['content-length'] = 12345 + headers['content-type'] = 'application/zip' + headers['x-ms-blob-type'] = 'Block' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-rabbits'] = 'monkeys' + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _test2_test_list_containers(self, method, url, body, headers): + # test_get_object + body = self.fixtures.load('list_containers.xml') + headers = {'content-type': 'application/zip', + 'etag': '"e31208wqsdoj329jd"', + 'x-amz-meta-rabbits': 'monkeys', + 'content-length': 12345, + 'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT' + } + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_ALREADY_EXISTS(self, method, url, body, headers): + # test_create_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.CONFLICT]) + + def _new_container(self, method, url, body, headers): + # test_create_container, test_delete_container + + headers = {} + + if method == 'PUT': + status = httplib.CREATED + + headers['etag'] = '0x8CFB877BB56A6FB' + headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT' + headers['x-ms-lease-status'] = 'unlocked' + headers['x-ms-lease-state'] = 'available' + headers['x-ms-meta-meta1'] = 'value1' + + elif method == 'DELETE': + status = httplib.NO_CONTENT + + return (status, + body, + headers, + httplib.responses[status]) + + def _new_container_DOESNT_EXIST(self, method, url, body, headers): + # test_delete_container + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): + # test_delete_container_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, + headers): + # test_delete_object_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_delete_object + return (httplib.ACCEPTED, + body, + headers, + httplib.responses[httplib.ACCEPTED]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_block(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_page(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_blocklist(self, method, url, + body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload_lease(self, method, url, + body, headers): + # test_upload_object_success + action = headers['x-ms-lease-action'] + rheaders = {'x-ms-lease-id': 'someleaseid'} + body = '' + + if action == 'acquire': + return (httplib.CREATED, + body, + rheaders, + httplib.responses[httplib.CREATED]) + + else: + if headers.get('x-ms-lease-id', None) != 'someleaseid': + return (httplib.BAD_REQUEST, + body, + rheaders, + httplib.responses[httplib.BAD_REQUEST]) + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.CREATED]) + + +class AzureBlobsMockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('azure_blobs') + + def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url, + body, headers): + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + + # test_upload_object_invalid_hash1 + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {} + headers['etag'] = '0x8CFB877BB56A6FB' + headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.CREATED]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_upload_object_invalid_file_size + body = self._generate_random_data(1000) + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, + body, headers): + # test_upload_object_invalid_file_size + body = '' + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + +class AzureBlobsTests(unittest.TestCase): + driver_type = AzureBlobsStorageDriver + driver_args = STORAGE_AZURE_BLOBS_PARAMS + mock_response_klass = AzureBlobsMockHttp + mock_raw_response_klass = AzureBlobsMockRawResponse + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args) + + def setUp(self): + self.driver_type.connectionCls.conn_classes = (None, + self.mock_response_klass) + self.driver_type.connectionCls.rawResponseCls = \ + self.mock_raw_response_klass + self.mock_response_klass.type = None + self.mock_raw_response_klass.type = None + self.driver = self.create_driver() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_invalid_credentials(self): + self.mock_response_klass.type = 'UNAUTHORIZED' + try: + self.driver.list_containers() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('Exception was not thrown') + + def test_list_containers_empty(self): + self.mock_response_klass.type = 'list_containers_EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + def test_list_containers_success(self): + self.mock_response_klass.type = 'list_containers' + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + containers = self.driver.list_containers() + self.assertEqual(len(containers), 4) + + self.assertTrue('last_modified' in containers[1].extra) + self.assertTrue('url' in containers[1].extra) + self.assertTrue('etag' in containers[1].extra) + self.assertTrue('lease' in containers[1].extra) + self.assertTrue('meta_data' in containers[1].extra) + + def test_list_container_objects_empty(self): + self.mock_response_klass.type = 'EMPTY' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + def test_list_container_objects_success(self): + self.mock_response_klass.type = None + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 4) + + obj = objects[1] + self.assertEqual(obj.name, 'object2.txt') + self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F') + self.assertEqual(obj.size, 1048576) + self.assertEqual(obj.container.name, 'test_container') + self.assertTrue('meta1' in obj.meta_data) + self.assertTrue('meta2' in obj.meta_data) + self.assertTrue('last_modified' in obj.extra) + self.assertTrue('content_type' in obj.extra) + self.assertTrue('content_encoding' in obj.extra) + self.assertTrue('content_language' in obj.extra) + + def test_get_container_doesnt_exist(self): + self.mock_response_klass.type = None + try: + self.driver.get_container(container_name='test_container100') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_container_success(self): + self.mock_response_klass.type = None + container = self.driver.get_container( + container_name='test_container200') + + self.assertTrue(container.name, 'test_container200') + self.assertTrue(container.extra['etag'], '0x8CFB877BB56A6FB') + self.assertTrue(container.extra['last_modified'], + 'Fri, 04 Jan 2013 09:48:06 GMT') + self.assertTrue(container.extra['lease']['status'], 'unlocked') + self.assertTrue(container.extra['lease']['state'], 'available') + self.assertTrue(container.extra['meta_data']['meta1'], 'value1') + + def test_get_object_container_doesnt_exist(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = None + try: + self.driver.get_object(container_name='test_container100', + object_name='test') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = None + obj = self.driver.get_object(container_name='test_container200', + object_name='test') + + self.assertEqual(obj.name, 'test') + self.assertEqual(obj.container.name, 'test_container200') + self.assertEqual(obj.size, 12345) + self.assertEqual(obj.hash, '0x8CFB877BB56A6FB') + self.assertEqual(obj.extra['last_modified'], + 'Fri, 04 Jan 2013 09:48:06 GMT') + self.assertEqual(obj.extra['content_type'], 'application/zip') + self.assertEqual(obj.meta_data['rabbits'], 'monkeys') + + def test_create_container_invalid_name(self): + # invalid container name + self.mock_response_klass.type = 'INVALID_NAME' + try: + self.driver.create_container(container_name='new--container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_already_exists(self): + # container with this name already exists + self.mock_response_klass.type = 'ALREADY_EXISTS' + try: + self.driver.create_container(container_name='new-container') + except ContainerAlreadyExistsError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + # success + self.mock_response_klass.type = None + name = 'new-container' + container = self.driver.create_container(container_name=name) + self.assertEqual(container.name, name) + + def test_delete_container_doesnt_exist(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'DOESNT_EXIST' + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_not_empty(self): + self.mock_response_klass.type = None + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_success(self): + self.mock_response_klass.type = 'EMPTY' + AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='test_container', extra={}, + driver=self.driver) + + self.assertTrue(self.driver.delete_container(container=container)) + + def test_delete_container_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Container does not exist but an exception was not' + + 'thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_invalid_file_already_exists(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + try: + self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_download_object_as_stream_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + + stream = self.driver.download_object_as_stream(obj=obj, + chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_invalid_ex_blob_type(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True, + ex_blob_type='invalid-blob') + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(str(e).lower().find('invalid blob type') != -1) + else: + self.fail('Exception was not thrown') + + def test_upload_object_invalid_md5(self): + # Invalid md5 is returned by azure + self.mock_raw_response_klass.type = 'INVALID_HASH' + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + file_path = os.path.abspath(__file__) + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + + def test_upload_small_block_object_success(self): + file_path = os.path.abspath(__file__) + file_size = os.stat(file_path).st_size + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + def test_upload_big_block_object_success(self): + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_BLOCK_MAX_SIZE + 1 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + + def test_upload_page_object_success(self): + self.mock_response_klass.use_param = None + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 4 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob') + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + + def test_upload_page_object_failure(self): + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 2 + 1 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + + try: + self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob') + except LibcloudError: + e = sys.exc_info()[1] + self.assertTrue(str(e).lower().find('not aligned') != -1) + + os.remove(file_path) + + def test_upload_small_block_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = os.path.abspath(__file__) + file_size = os.stat(file_path).st_size + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + self.mock_response_klass.use_param = None + + def test_upload_big_block_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_BLOCK_MAX_SIZE * 2 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='BlockBlob', + ex_use_lease=False) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + self.mock_response_klass.use_param = None + + def test_upload_page_object_success_with_lease(self): + self.mock_response_klass.use_param = 'comp' + file_path = tempfile.mktemp(suffix='.jpg') + file_size = AZURE_PAGE_CHUNK_SIZE * 4 + + with open(file_path, 'w') as file_hdl: + file_hdl.write('0' * file_size) + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=False, + ex_blob_type='PageBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, file_size) + self.assertTrue('some-value' in obj.meta_data) + + os.remove(file_path) + self.mock_response_klass.use_param = None + + def test_upload_blob_object_via_stream(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='BlockBlob') + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + self.mock_response_klass.use_param = None + + def test_upload_blob_object_via_stream_with_lease(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='BlockBlob', + ex_use_lease=True) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + self.mock_response_klass.use_param = None + + def test_upload_page_object_via_stream(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + blob_size = AZURE_PAGE_CHUNK_SIZE + iterator = DummyIterator(data=['1'] * blob_size) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='PageBlob', + ex_page_blob_size=blob_size) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, blob_size) + self.mock_response_klass.use_param = None + + def test_upload_page_object_via_stream_with_lease(self): + self.mock_response_klass.use_param = 'comp' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + object_name = 'foo_test_upload' + blob_size = AZURE_PAGE_CHUNK_SIZE + iterator = DummyIterator(data=['1'] * blob_size) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra, + ex_blob_type='PageBlob', + ex_page_blob_size=blob_size, + ex_use_lease=True) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, blob_size) + + def test_delete_object_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + + result = self.driver.delete_object(obj=obj) + self.assertTrue(result) + + def test_storage_driver_host(self): + # Non regression tests for issue LIBCLOUD-399 dealing with the bad + # management of the connectionCls.host class attribute + driver1 = self.driver_type('fakeaccount1', 'deadbeafcafebabe==') + driver2 = self.driver_type('fakeaccount2', 'deadbeafcafebabe==') + driver3 = self.driver_type('fakeaccount3', 'deadbeafcafebabe==', + host='test.foo.bar.com') + + host1 = driver1.connection.host + host2 = driver2.connection.host + host3 = driver3.connection.host + + self.assertEquals(host1, 'fakeaccount1.blob.core.windows.net') + self.assertEquals(host2, 'fakeaccount2.blob.core.windows.net') + self.assertEquals(host3, 'test.foo.bar.com') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_base.py libcloud-0.15.1/libcloud/test/storage/test_base.py --- libcloud-0.5.0/libcloud/test/storage/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_base.py 2013-12-12 09:01:49.000000000 +0000 @@ -0,0 +1,193 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import hashlib + +from mock import Mock + +from libcloud.utils.py3 import StringIO +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import b + +if PY3: + from io import FileIO as file + +from libcloud.storage.base import StorageDriver +from libcloud.storage.base import DEFAULT_CONTENT_TYPE + +from libcloud.test import unittest +from libcloud.test import StorageMockHttp + + +class BaseStorageTests(unittest.TestCase): + + def setUp(self): + self.send_called = 0 + StorageDriver.connectionCls.conn_classes = (None, StorageMockHttp) + + self.driver1 = StorageDriver('username', 'key', host='localhost') + self.driver1.supports_chunked_encoding = True + self.driver2 = StorageDriver('username', 'key', host='localhost') + self.driver2.supports_chunked_encoding = False + + self.driver1.strict_mode = False + self.driver1.strict_mode = False + + def test__upload_object_iterator_must_have_next_method(self): + class Iterator(object): + + def next(self): + pass + + class Iterator2(file): + + def __init__(self): + pass + + class SomeClass(object): + pass + + valid_iterators = [Iterator(), Iterator2(), StringIO('bar')] + invalid_iterators = ['foobar', '', False, True, 1, object()] + + def upload_func(*args, **kwargs): + return True, 'barfoo', 100 + + kwargs = {'object_name': 'foo', 'content_type': 'foo/bar', + 'upload_func': upload_func, 'upload_func_kwargs': {}, + 'request_path': '/', 'headers': {}} + + for value in valid_iterators: + kwargs['iterator'] = value + self.driver1._upload_object(**kwargs) + + for value in invalid_iterators: + kwargs['iterator'] = value + + try: + self.driver1._upload_object(**kwargs) + except AttributeError: + pass + else: + self.fail('Exception was not thrown') + + def test_upload_zero_bytes_long_object_via_stream(self): + iterator = Mock() + + if PY3: + iterator.__next__ = Mock() + iterator.__next__.side_effect = StopIteration() + else: + iterator.next.side_effect = StopIteration() + + def mock_send(data): + self.send_called += 1 + + response = Mock() + response.connection.connection.send = mock_send + + # Normal + success, data_hash, bytes_transferred = \ + self.driver1._stream_data(response=response, + iterator=iterator, + chunked=False, calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5(b('')).hexdigest()) + self.assertEqual(bytes_transferred, 0) + self.assertEqual(self.send_called, 1) + + # Chunked + success, data_hash, bytes_transferred = \ + self.driver1._stream_data(response=response, + iterator=iterator, + chunked=True, calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5(b('')).hexdigest()) + self.assertEqual(bytes_transferred, 0) + self.assertEqual(self.send_called, 5) + + def test__upload_data(self): + def mock_send(data): + self.send_called += 1 + + response = Mock() + response.connection.connection.send = mock_send + + data = '123456789901234567' + success, data_hash, bytes_transferred = \ + self.driver1._upload_data(response=response, data=data, + calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5(b(data)).hexdigest()) + self.assertEqual(bytes_transferred, (len(data))) + self.assertEqual(self.send_called, 1) + + def test__get_hash_function(self): + self.driver1.hash_type = 'md5' + func = self.driver1._get_hash_function() + self.assertTrue(func) + + self.driver1.hash_type = 'sha1' + func = self.driver1._get_hash_function() + self.assertTrue(func) + + try: + self.driver1.hash_type = 'invalid-hash-function' + func = self.driver1._get_hash_function() + except RuntimeError: + pass + else: + self.fail('Invalid hash type but exception was not thrown') + + def test_upload_no_content_type_supplied_or_detected(self): + iterator = StringIO() + + upload_func = Mock() + upload_func.return_value = True, '', 0 + + # strict_mode is disabled, default content type should be used + self.driver1.connection = Mock() + + self.driver1._upload_object(object_name='test', + content_type=None, + upload_func=upload_func, + upload_func_kwargs={}, + request_path='/', + iterator=iterator) + + headers = self.driver1.connection.request.call_args[-1]['headers'] + self.assertEqual(headers['Content-Type'], DEFAULT_CONTENT_TYPE) + + # strict_mode is enabled, exception should be thrown + + self.driver1.strict_mode = True + expected_msg = ('File content-type could not be guessed and no' + ' content_type value is provided') + self.assertRaisesRegexp(AttributeError, expected_msg, + self.driver1._upload_object, + object_name='test', + content_type=None, + upload_func=upload_func, + upload_func_kwargs={}, + request_path='/', + iterator=iterator) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_cloudfiles.py libcloud-0.15.1/libcloud/test/storage/test_cloudfiles.py --- libcloud-0.5.0/libcloud/test/storage/test_cloudfiles.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_cloudfiles.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,1133 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from hashlib import sha1 +import hmac +import os +import os.path # pylint: disable-msg=W0404 +import math +import sys +import copy +import unittest + +import mock + +import libcloud.utils.files + +from libcloud.utils.py3 import b +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlquote + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver +from libcloud.storage.drivers.cloudfiles import CloudFilesUSStorageDriver +from libcloud.storage.drivers.cloudfiles import CloudFilesUKStorageDriver +from libcloud.storage.drivers.dummy import DummyIterator + +from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 + + +class CloudFilesTests(unittest.TestCase): + driver_klass = CloudFilesStorageDriver + driver_args = ('dummy', 'dummy') + driver_kwargs = {} + region = 'ord' + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = ( + None, CloudFilesMockHttp) + self.driver_klass.connectionCls.rawResponseCls = \ + CloudFilesMockRawResponse + CloudFilesMockHttp.type = None + CloudFilesMockRawResponse.type = None + self.driver = self.driver_klass(*self.driver_args, + **self.driver_kwargs) + + # normally authentication happens lazily, but we force it here + self.driver.connection._populate_hosts_and_request_paths() + self._remove_test_file() + + def tearDown(self): + self._remove_test_file() + + def test_invalid_ex_force_service_region(self): + driver = CloudFilesStorageDriver('driver', 'dummy', + ex_force_service_region='invalid') + + try: + driver.list_containers() + except: + e = sys.exc_info()[1] + self.assertEqual(e.value, 'Could not find specified endpoint') + else: + self.fail('Exception was not thrown') + + def test_ex_force_service_region(self): + driver = CloudFilesStorageDriver('driver', 'dummy', + ex_force_service_region='ORD') + driver.list_containers() + + def test_force_auth_token_kwargs(self): + base_url = 'https://cdn2.clouddrive.com/v1/MossoCloudFS' + kwargs = { + 'ex_force_auth_token': 'some-auth-token', + 'ex_force_base_url': base_url + } + driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) + driver.list_containers() + + self.assertEqual(kwargs['ex_force_auth_token'], + driver.connection.auth_token) + self.assertEqual('cdn2.clouddrive.com', + driver.connection.host) + self.assertEqual('/v1/MossoCloudFS', + driver.connection.request_path) + + def test_force_auth_url_kwargs(self): + kwargs = { + 'ex_force_auth_version': '2.0', + 'ex_force_auth_url': 'https://identity.api.rackspace.com' + } + driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) + + self.assertEqual(kwargs['ex_force_auth_url'], + driver.connection._ex_force_auth_url) + self.assertEqual(kwargs['ex_force_auth_version'], + driver.connection._auth_version) + + def test_invalid_json_throws_exception(self): + CloudFilesMockHttp.type = 'MALFORMED_JSON' + try: + self.driver.list_containers() + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_service_catalog(self): + url = 'https://storage4.%s1.clouddrive.com/v1/MossoCloudFS' % \ + (self.region) + self.assertEqual( + url, + self.driver.connection.get_endpoint()) + + self.driver.connection.cdn_request = True + self.assertEqual( + 'https://cdn.clouddrive.com/v1/MossoCloudFS', + self.driver.connection.get_endpoint()) + self.driver.connection.cdn_request = False + + def test_endpoint_pointer(self): + kwargs = {'use_internal_url': False} + driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) + self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.PUBLIC_ENDPOINT_KEY) + kwargs = {'use_internal_url': True} + driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) + self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.INTERNAL_ENDPOINT_KEY) + driver.connection.cdn_request = True + self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.PUBLIC_ENDPOINT_KEY) + + def test_list_containers(self): + CloudFilesMockHttp.type = 'EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + CloudFilesMockHttp.type = None + containers = self.driver.list_containers() + self.assertEqual(len(containers), 3) + + container = [c for c in containers if c.name == 'container2'][0] + self.assertEqual(container.extra['object_count'], 120) + self.assertEqual(container.extra['size'], 340084450) + + def test_list_container_objects(self): + CloudFilesMockHttp.type = 'EMPTY' + container = Container( + name='test_container', extra={}, driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + CloudFilesMockHttp.type = None + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 4) + + obj = [o for o in objects if o.name == 'foo test 1'][0] + self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') + self.assertEqual(obj.size, 1160520) + self.assertEqual(obj.container.name, 'test_container') + + def test_list_container_object_name_encoding(self): + CloudFilesMockHttp.type = 'EMPTY' + container = Container(name='test container 1', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + def test_list_container_objects_with_prefix(self): + CloudFilesMockHttp.type = 'EMPTY' + container = Container( + name='test_container', extra={}, driver=self.driver) + objects = self.driver.list_container_objects(container=container, + ex_prefix='test_prefix1') + self.assertEqual(len(objects), 0) + + CloudFilesMockHttp.type = None + objects = self.driver.list_container_objects(container=container, + ex_prefix='test_prefix2') + self.assertEqual(len(objects), 4) + + obj = [o for o in objects if o.name == 'foo test 1'][0] + self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') + self.assertEqual(obj.size, 1160520) + self.assertEqual(obj.container.name, 'test_container') + + def test_list_container_objects_iterator(self): + CloudFilesMockHttp.type = 'ITERATOR' + container = Container( + name='test_container', extra={}, driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 5) + + obj = [o for o in objects if o.name == 'foo-test-1'][0] + self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') + self.assertEqual(obj.size, 1160520) + self.assertEqual(obj.container.name, 'test_container') + + def test_get_container(self): + container = self.driver.get_container(container_name='test_container') + self.assertEqual(container.name, 'test_container') + self.assertEqual(container.extra['object_count'], 800) + self.assertEqual(container.extra['size'], 1234568) + + def test_get_container_not_found(self): + try: + self.driver.get_container(container_name='not_found') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + obj = self.driver.get_object(container_name='test_container', + object_name='test_object') + self.assertEqual(obj.container.name, 'test_container') + self.assertEqual(obj.size, 555) + self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') + self.assertEqual(obj.extra['content_type'], 'application/zip') + self.assertEqual( + obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') + self.assertEqual(obj.meta_data['foo-bar'], 'test 1') + self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + + def test_get_object_object_name_encoding(self): + obj = self.driver.get_object(container_name='test_container', + object_name='~/test_object/') + self.assertEqual(obj.name, '~/test_object/') + + def test_get_object_not_found(self): + try: + self.driver.get_object(container_name='test_container', + object_name='not_found') + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + container = self.driver.create_container( + container_name='test_create_container') + self.assertTrue(isinstance(container, Container)) + self.assertEqual(container.name, 'test_create_container') + self.assertEqual(container.extra['object_count'], 0) + + def test_create_container_already_exists(self): + CloudFilesMockHttp.type = 'ALREADY_EXISTS' + + try: + self.driver.create_container( + container_name='test_create_container') + except ContainerAlreadyExistsError: + pass + else: + self.fail( + 'Container already exists but an exception was not thrown') + + def test_create_container_invalid_name_too_long(self): + name = ''.join(['x' for x in range(0, 257)]) + try: + self.driver.create_container(container_name=name) + except InvalidContainerNameError: + pass + else: + self.fail( + 'Invalid name was provided (name is too long)' + ', but exception was not thrown') + + def test_create_container_invalid_name_slashes_in_name(self): + try: + self.driver.create_container(container_name='test/slashes/') + except InvalidContainerNameError: + pass + else: + self.fail( + 'Invalid name was provided (name contains slashes)' + ', but exception was not thrown') + + def test_delete_container_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.delete_container(container=container) + self.assertTrue(result) + + def test_delete_container_not_found(self): + CloudFilesMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail( + 'Container does not exist but an exception was not thrown') + + def test_delete_container_not_empty(self): + CloudFilesMockHttp.type = 'NOT_EMPTY' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Container is not empty but an exception was not thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + CloudFilesMockRawResponse.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_success_not_found(self): + CloudFilesMockRawResponse.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, + meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + try: + self.driver.download_object( + obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_as_stream(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + + stream = self.driver.download_object_as_stream( + obj=obj, chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object( + file_path=file_path, container=container, + extra=extra, object_name=object_name) + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + CloudFilesStorageDriver._upload_file = old_func + + def test_upload_object_zero_size_object(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 0 + + old_func = CloudFilesStorageDriver._upload_file + old_request = self.driver.connection.request + CloudFilesStorageDriver._upload_file = upload_file + file_path = os.path.join(os.path.dirname(__file__), '__init__.py') + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'empty' + extra = {} + + def func(*args, **kwargs): + self.assertEqual(kwargs['headers']['Content-Length'], 0) + func.called = True + return old_request(*args, **kwargs) + + self.driver.connection.request = func + func.called = False + obj = self.driver.upload_object( + file_path=file_path, container=container, + extra=extra, object_name=object_name) + self.assertEqual(obj.name, 'empty') + self.assertEqual(obj.size, 0) + self.assertTrue(func.called) + CloudFilesStorageDriver._upload_file = old_func + self.driver.connection.request = old_request + + def test_upload_object_invalid_hash(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + CloudFilesMockRawResponse.type = 'INVALID_HASH' + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + CloudFilesStorageDriver._upload_file = old_func + + def test_upload_object_no_content_type(self): + def no_content_type(name): + return None, None + + old_func = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = no_content_type + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + + obj = self.driver.upload_object(file_path=file_path, verify_hash=False, + container=container, + object_name=object_name) + + self.assertEqual(obj.name, object_name) + libcloud.utils.files.guess_file_mime_type = old_func + + def test_upload_object_error(self): + def dummy_content_type(name): + return 'application/zip', None + + def send(instance): + raise Exception('') + + old_func1 = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = dummy_content_type + old_func2 = CloudFilesMockHttp.send + CloudFilesMockHttp.send = send + + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except LibcloudError: + pass + else: + self.fail( + 'Timeout while uploading but an exception was not thrown') + finally: + libcloud.utils.files.guess_file_mime_type = old_func1 + CloudFilesMockHttp.send = old_func2 + + def test_upload_object_inexistent_file(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = dummy_content_type + + file_path = os.path.abspath(__file__ + '.inexistent') + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except OSError: + pass + else: + self.fail('Inexistent but an exception was not thrown') + finally: + libcloud.utils.files.guess_file_mime_type = old_func + + def test_upload_object_via_stream(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.files.guess_file_mime_type + libcloud.utils.files.guess_file_mime_type = dummy_content_type + + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_stream_data' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + finally: + libcloud.utils.files.guess_file_mime_type = old_func + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + status = self.driver.delete_object(obj=obj) + self.assertTrue(status) + + def test_delete_object_not_found(self): + CloudFilesMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_ex_get_meta_data(self): + meta_data = self.driver.ex_get_meta_data() + self.assertTrue(isinstance(meta_data, dict)) + self.assertTrue('object_count' in meta_data) + self.assertTrue('container_count' in meta_data) + self.assertTrue('bytes_used' in meta_data) + self.assertTrue('temp_url_key' in meta_data) + + def test_ex_purge_object_from_cdn(self): + CloudFilesMockHttp.type = 'PURGE_SUCCESS' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self) + + self.assertTrue(self.driver.ex_purge_object_from_cdn(obj=obj)) + + def test_ex_purge_object_from_cdn_with_email(self): + CloudFilesMockHttp.type = 'PURGE_SUCCESS_EMAIL' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self) + self.assertTrue(self.driver.ex_purge_object_from_cdn(obj=obj, + email='test@test.com')) + + @mock.patch('os.path.getsize') + def test_ex_multipart_upload_object_for_small_files(self, getsize_mock): + getsize_mock.return_value = 0 + + old_func = CloudFilesStorageDriver.upload_object + mocked_upload_object = mock.Mock(return_value="test") + CloudFilesStorageDriver.upload_object = mocked_upload_object + + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + obj = self.driver.ex_multipart_upload_object( + file_path=file_path, container=container, + object_name=object_name) + CloudFilesStorageDriver.upload_object = old_func + + self.assertTrue(mocked_upload_object.called) + self.assertEqual(obj, "test") + + def test_ex_multipart_upload_object_success(self): + _upload_object_part = CloudFilesStorageDriver._upload_object_part + _upload_object_manifest = CloudFilesStorageDriver._upload_object_manifest + + mocked__upload_object_part = mock.Mock(return_value="test_part") + mocked__upload_object_manifest = mock.Mock( + return_value="test_manifest") + + CloudFilesStorageDriver._upload_object_part = mocked__upload_object_part + CloudFilesStorageDriver._upload_object_manifest = mocked__upload_object_manifest + + parts = 5 + file_path = os.path.abspath(__file__) + chunk_size = int(math.ceil(float(os.path.getsize(file_path)) / parts)) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + self.driver.ex_multipart_upload_object( + file_path=file_path, container=container, + object_name=object_name, chunk_size=chunk_size) + + CloudFilesStorageDriver._upload_object_part = _upload_object_part + CloudFilesStorageDriver._upload_object_manifest = _upload_object_manifest + + self.assertEqual(mocked__upload_object_part.call_count, parts) + self.assertTrue(mocked__upload_object_manifest.call_count, 1) + + def test__upload_object_part(self): + _put_object = CloudFilesStorageDriver._put_object + mocked__put_object = mock.Mock(return_value="test") + CloudFilesStorageDriver._put_object = mocked__put_object + + part_number = 7 + object_name = "test_object" + expected_name = object_name + '/%08d' % part_number + container = Container(name='foo_bar_container', extra={}, driver=self) + + self.driver._upload_object_part(container, object_name, + part_number, None) + + CloudFilesStorageDriver._put_object = _put_object + + func_kwargs = tuple(mocked__put_object.call_args)[1] + self.assertEqual(func_kwargs['object_name'], expected_name) + self.assertEqual(func_kwargs['container'], container) + + def test__upload_object_manifest(self): + hash_function = self.driver._get_hash_function() + hash_function.update(b('')) + data_hash = hash_function.hexdigest() + + fake_response = type('CloudFilesResponse', (), {'headers': + {'etag': data_hash} + }) + + _request = self.driver.connection.request + mocked_request = mock.Mock(return_value=fake_response) + self.driver.connection.request = mocked_request + + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = "test_object" + + self.driver._upload_object_manifest(container, object_name) + + func_args, func_kwargs = tuple(mocked_request.call_args) + + self.driver.connection.request = _request + + self.assertEqual( + func_args[0], "/" + container.name + "/" + object_name) + self.assertEqual(func_kwargs["headers"]["X-Object-Manifest"], + container.name + "/" + object_name + "/") + self.assertEqual(func_kwargs["method"], "PUT") + + def test__upload_object_manifest_wrong_hash(self): + fake_response = type('CloudFilesResponse', (), {'headers': + {'etag': '0000000'}}) + + _request = self.driver.connection.request + mocked_request = mock.Mock(return_value=fake_response) + self.driver.connection.request = mocked_request + + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = "test_object" + + try: + self.driver._upload_object_manifest(container, object_name) + except ObjectHashMismatchError: + pass + else: + self.fail('Exception was not thrown') + finally: + self.driver.connection.request = _request + + def test_create_container_put_object_name_encoding(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + + container_name = 'speci@l_name' + object_name = 'm@objā‚¬ct' + file_path = os.path.abspath(__file__) + + container = self.driver.create_container(container_name=container_name) + self.assertEqual(container.name, container_name) + + obj = self.driver.upload_object( + file_path=file_path, container=container, + object_name=object_name) + self.assertEqual(obj.name, object_name) + CloudFilesStorageDriver._upload_file = old_func + + def test_ex_enable_static_website(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.ex_enable_static_website(container=container, + index_file='index.html') + self.assertTrue(result) + + def test_ex_set_error_page(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.ex_set_error_page(container=container, + file_name='error.html') + self.assertTrue(result) + + def test_ex_set_account_metadata_temp_url_key(self): + result = self.driver.ex_set_account_metadata_temp_url_key("a key") + self.assertTrue(result) + + @mock.patch("libcloud.storage.drivers.cloudfiles.time") + def test_ex_get_object_temp_url(self, time): + time.return_value = 0 + self.driver.ex_get_meta_data = mock.Mock() + self.driver.ex_get_meta_data.return_value = {'container_count': 1, + 'object_count': 1, + 'bytes_used': 1, + 'temp_url_key': 'foo'} + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self) + hmac_body = "%s\n%s\n%s" % ('GET', 60, + "/v1/MossoCloudFS/foo_bar_container/foo_bar_object") + sig = hmac.new(b('foo'), b(hmac_body), sha1).hexdigest() + ret = self.driver.ex_get_object_temp_url(obj, 'GET') + temp_url = ('https://storage4.%s1.clouddrive.com/v1/MossoCloudFS/' + 'foo_bar_container/foo_bar_object?temp_url_expires=60&temp_url_sig=%s' % + (self.region, sig)) + + self.assertEqual(''.join(sorted(ret)), ''.join(sorted(temp_url))) + + def test_ex_get_object_temp_url_no_key_raises_key_error(self): + self.driver.ex_get_meta_data = mock.Mock() + self.driver.ex_get_meta_data.return_value = {'container_count': 1, + 'object_count': 1, + 'bytes_used': 1, + 'temp_url_key': None} + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self) + self.assertRaises( + KeyError, self.driver.ex_get_object_temp_url, obj, 'GET') + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + +class CloudFilesDeprecatedUSTests(CloudFilesTests): + driver_klass = CloudFilesUSStorageDriver + region = 'ord' + + +class CloudFilesDeprecatedUKTests(CloudFilesTests): + driver_klass = CloudFilesUKStorageDriver + region = 'lon' + + +class CloudFilesMockHttp(StorageMockHttp, MockHttpTestCase): + + fixtures = StorageFileFixtures('cloudfiles') + base_headers = {'content-type': 'application/json; charset=UTF-8'} + + # fake auth token response + def _v2_0_tokens(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_MALFORMED_JSON(self, method, url, body, headers): + # test_invalid_json_throws_exception + body = 'broken: json /*"' + return (httplib.NO_CONTENT, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_EMPTY(self, method, url, body, headers): + return (httplib.NO_CONTENT, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'GET': + # list_containers + body = self.fixtures.load('list_containers.json') + status_code = httplib.OK + elif method == 'HEAD': + # get_meta_data + body = self.fixtures.load('meta_data.json') + status_code = httplib.NO_CONTENT + headers.update({'x-account-container-count': 10, + 'x-account-object-count': 400, + 'x-account-bytes-used': 1234567 + }) + elif method == 'POST': + body = '' + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_not_found(self, method, url, body, headers): + # test_get_object_not_found + if method == 'HEAD': + body = '' + else: + raise ValueError('Invalid method') + + return (httplib.NOT_FOUND, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.json') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_20container_201_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.json') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'GET': + # list_container_objects + if url.find('marker') == -1: + body = self.fixtures.load('list_container_objects.json') + status_code = httplib.OK + else: + body = '' + status_code = httplib.NO_CONTENT + elif method == 'HEAD': + # get_container + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({'x-container-object-count': 800, + 'x-container-bytes-used': 1234568 + }) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_ITERATOR(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + # list_container_objects + if url.find('foo-test-3') != -1: + body = self.fixtures.load( + 'list_container_objects_not_exhausted2.json') + status_code = httplib.OK + elif url.find('foo-test-5') != -1: + body = '' + status_code = httplib.NO_CONTENT + else: + # First request + body = self.fixtures.load( + 'list_container_objects_not_exhausted1.json') + status_code = httplib.OK + + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_not_found( + self, method, url, body, headers): + # test_get_container_not_found + if method == 'HEAD': + body = '' + else: + raise ValueError('Invalid method') + + return (httplib.NOT_FOUND, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_test_object( + self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'HEAD': + # get_object + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({'content-length': 555, + 'last-modified': 'Tue, 25 Jan 2011 22:01:49 GMT', + 'etag': '6b21c4a111ac178feacf9ec9d0c71f17', + 'x-object-meta-foo-bar': 'test 1', + 'x-object-meta-bar-foo': 'test 2', + 'content-type': 'application/zip'}) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container__7E_test_object( + self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'HEAD': + # get_object_name_encoding + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({'content-length': 555, + 'last-modified': 'Tue, 25 Jan 2011 22:01:49 GMT', + 'etag': '6b21c4a111ac178feacf9ec9d0c71f17', + 'x-object-meta-foo-bar': 'test 1', + 'x-object-meta-bar-foo': 'test 2', + 'content-type': 'application/zip'}) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_create_container( + self, method, url, body, headers): + # test_create_container_success + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers = copy.deepcopy(self.base_headers) + headers.update({'content-length': 18, + 'date': 'Mon, 28 Feb 2011 07:52:57 GMT' + }) + status_code = httplib.CREATED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_speci_40l_name(self, method, url, body, headers): + # test_create_container_put_object_name_encoding + # Verify that the name is properly url encoded + container_name = 'speci@l_name' + encoded_container_name = urlquote(container_name) + self.assertTrue(encoded_container_name in url) + + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers = copy.deepcopy(self.base_headers) + headers.update({'content-length': 18, + 'date': 'Mon, 28 Feb 2011 07:52:57 GMT' + }) + status_code = httplib.CREATED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_create_container_ALREADY_EXISTS( + self, method, url, body, headers): + # test_create_container_already_exists + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers.update({'content-type': 'text/plain'}) + status_code = httplib.ACCEPTED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container(self, method, url, body, headers): + if method == 'DELETE': + # test_delete_container_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NO_CONTENT + elif method == 'POST': + # test_ex_enable_static_website + body = '' + headers = self.base_headers + status_code = httplib.ACCEPTED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_object_PURGE_SUCCESS( + self, method, url, body, headers): + + if method == 'DELETE': + # test_ex_purge_from_cdn + headers = self.base_headers + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_object_PURGE_SUCCESS_EMAIL( + self, method, url, body, headers): + + if method == 'DELETE': + # test_ex_purge_from_cdn_with_email + self.assertEqual(headers['X-Purge-Email'], 'test@test.com') + headers = self.base_headers + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_NOT_FOUND( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_container_not_found + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NOT_FOUND + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_NOT_EMPTY( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_container_not_empty + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.CONFLICT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_object_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_object_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NOT_FOUND + + return (status_code, body, headers, httplib.responses[httplib.OK]) + + +class CloudFilesMockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('cloudfiles') + base_headers = {'content-type': 'application/json; charset=UTF-8'} + + def _v1_MossoCloudFS_foo_bar_container_foo_test_upload( + self, method, url, body, headers): + # test_object_upload_success + + body = '' + headers = {} + headers.update(self.base_headers) + headers['etag'] = 'hash343hhash89h932439jsaa89' + return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_speci_40l_name_m_40obj_E2_82_ACct(self, method, url, + body, headers): + # test_create_container_put_object_name_encoding + # Verify that the name is properly url encoded + object_name = 'm@objā‚¬ct' + urlquote(object_name) + + headers = copy.deepcopy(self.base_headers) + body = '' + headers['etag'] = 'hash343hhash89h932439jsaa89' + return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_empty(self, method, url, body, + headers): + # test_upload_object_zero_size_object + body = '' + headers = {} + headers.update(self.base_headers) + headers['etag'] = 'hash343hhash89h932439jsaa89' + return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_test_upload_INVALID_HASH( + self, method, url, body, headers): + # test_object_upload_invalid_hash + body = '' + headers = {} + headers.update(self.base_headers) + headers['etag'] = 'foobar' + return (httplib.CREATED, body, headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( + self, method, url, body, headers): + + # test_download_object_success + body = self._generate_random_data(1000) + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_INVALID_SIZE( + self, method, url, body, headers): + # test_download_object_invalid_file_size + body = self._generate_random_data(100) + return (httplib.OK, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, url, body, headers): + body = '' + return (httplib.NOT_FOUND, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_test_stream_data( + self, method, url, body, headers): + + # test_upload_object_via_stream_success + headers = {} + headers.update(self.base_headers) + headers['etag'] = '577ef1154f3240ad5b9b413aa7346a1e' + body = 'test' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_google_storage.py libcloud-0.15.1/libcloud/test/storage/test_google_storage.py --- libcloud-0.5.0/libcloud/test/storage/test_google_storage.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_google_storage.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.storage.drivers.google_storage import GoogleStorageDriver +from libcloud.test.storage.test_s3 import S3Tests, S3MockHttp + +from libcloud.test.file_fixtures import StorageFileFixtures +from libcloud.test.secrets import STORAGE_GOOGLE_STORAGE_PARAMS + + +class GoogleStorageMockHttp(S3MockHttp): + fixtures = StorageFileFixtures('google_storage') + + +class GoogleStorageTests(S3Tests): + driver_type = GoogleStorageDriver + driver_args = STORAGE_GOOGLE_STORAGE_PARAMS + mock_response_klass = GoogleStorageMockHttp + + def test_billing_not_enabled(self): + # TODO + pass + + def test_token(self): + # Not supported on Google Storage + pass + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_local.py libcloud-0.15.1/libcloud/test/storage/test_local.py --- libcloud-0.5.0/libcloud/test/storage/test_local.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_local.py 2013-11-29 12:35:05.000000000 +0000 @@ -0,0 +1,334 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement + +import os +import sys +import shutil +import unittest +import tempfile + +import mock + +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import InvalidContainerNameError + +try: + from libcloud.storage.drivers.local import LocalStorageDriver + from libcloud.storage.drivers.local import LockLocalStorage + from lockfile import LockTimeout +except ImportError: + print('lockfile library is not available, skipping local_storage tests...') + LocalStorageDriver = None + LockTimeout = None + + +class LocalTests(unittest.TestCase): + driver_type = LocalStorageDriver + + @classmethod + def create_driver(self): + self.key = tempfile.mkdtemp() + return self.driver_type(self.key, None) + + def setUp(self): + self.driver = self.create_driver() + + def tearDown(self): + shutil.rmtree(self.key) + self.key = None + + def make_tmp_file(self): + _, tmppath = tempfile.mkstemp() + + with open(tmppath, 'w') as fp: + fp.write('blah' * 1024) + + return tmppath + + def remove_tmp_file(self, tmppath): + os.unlink(tmppath) + + def test_list_containers_empty(self): + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + def test_containers_success(self): + self.driver.create_container('test1') + self.driver.create_container('test2') + containers = self.driver.list_containers() + self.assertEqual(len(containers), 2) + + container = containers[1] + + self.assertTrue('creation_time' in container.extra) + self.assertTrue('modify_time' in container.extra) + self.assertTrue('access_time' in container.extra) + + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + objects = container.list_objects() + self.assertEqual(len(objects), 0) + + for container in containers: + self.driver.delete_container(container) + + def test_objects_success(self): + tmppath = self.make_tmp_file() + tmpfile = open(tmppath) + + container = self.driver.create_container('test3') + obj1 = container.upload_object(tmppath, 'object1') + obj2 = container.upload_object(tmppath, 'path/object2') + obj3 = container.upload_object(tmppath, 'path/to/object3') + obj4 = container.upload_object(tmppath, 'path/to/object4.ext') + obj5 = container.upload_object_via_stream(tmpfile, 'object5') + + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 5) + + for obj in objects: + self.assertNotEqual(obj.hash, None) + self.assertEqual(obj.size, 4096) + self.assertEqual(obj.container.name, 'test3') + self.assertTrue('creation_time' in obj.extra) + self.assertTrue('modify_time' in obj.extra) + self.assertTrue('access_time' in obj.extra) + + obj1.delete() + obj2.delete() + + objects = container.list_objects() + self.assertEqual(len(objects), 3) + + container.delete_object(obj3) + container.delete_object(obj4) + container.delete_object(obj5) + + objects = container.list_objects() + self.assertEqual(len(objects), 0) + + container.delete() + tmpfile.close() + self.remove_tmp_file(tmppath) + + def test_get_container_doesnt_exist(self): + try: + self.driver.get_container(container_name='container1') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_container_success(self): + self.driver.create_container('test4') + container = self.driver.get_container(container_name='test4') + self.assertTrue(container.name, 'test4') + container.delete() + + def test_get_object_container_doesnt_exist(self): + try: + self.driver.get_object(container_name='test-inexistent', + object_name='test') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + tmppath = self.make_tmp_file() + container = self.driver.create_container('test5') + container.upload_object(tmppath, 'test') + + obj = self.driver.get_object(container_name='test5', + object_name='test') + + self.assertEqual(obj.name, 'test') + self.assertEqual(obj.container.name, 'test5') + self.assertEqual(obj.size, 4096) + self.assertNotEqual(obj.hash, None) + self.assertTrue('creation_time' in obj.extra) + self.assertTrue('modify_time' in obj.extra) + self.assertTrue('access_time' in obj.extra) + + obj.delete() + container.delete() + self.remove_tmp_file(tmppath) + + def test_create_container_invalid_name(self): + try: + self.driver.create_container(container_name='new/container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_already_exists(self): + container = self.driver.create_container( + container_name='new-container') + try: + self.driver.create_container(container_name='new-container') + except ContainerAlreadyExistsError: + pass + else: + self.fail('Exception was not thrown') + + # success + self.driver.delete_container(container) + + def test_create_container_success(self): + name = 'new_container' + container = self.driver.create_container(container_name=name) + self.assertEqual(container.name, name) + self.driver.delete_container(container) + + def test_delete_container_doesnt_exist(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_not_empty(self): + tmppath = self.make_tmp_file() + container = self.driver.create_container('test6') + obj = container.upload_object(tmppath, 'test') + + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Exception was not thrown') + + # success + obj.delete() + self.remove_tmp_file(tmppath) + self.assertTrue(self.driver.delete_container(container=container)) + + def test_delete_container_not_found(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Container does not exist but an exception was not' + + 'thrown') + + def test_delete_container_success(self): + container = self.driver.create_container('test7') + self.assertTrue(self.driver.delete_container(container=container)) + + def test_download_object_success(self): + tmppath = self.make_tmp_file() + container = self.driver.create_container('test6') + obj = container.upload_object(tmppath, 'test') + + destination_path = tmppath + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + + self.assertTrue(result) + + obj.delete() + container.delete() + self.remove_tmp_file(tmppath) + os.unlink(destination_path) + + def test_download_object_and_overwrite(self): + tmppath = self.make_tmp_file() + container = self.driver.create_container('test6') + obj = container.upload_object(tmppath, 'test') + + destination_path = tmppath + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + + self.assertTrue(result) + + try: + self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=True, + delete_on_failure=True) + + self.assertTrue(result) + + # success + obj.delete() + container.delete() + self.remove_tmp_file(tmppath) + os.unlink(destination_path) + + def test_download_object_as_stream_success(self): + tmppath = self.make_tmp_file() + container = self.driver.create_container('test6') + obj = container.upload_object(tmppath, 'test') + + stream = self.driver.download_object_as_stream(obj=obj, + chunk_size=1024) + + self.assertTrue(hasattr(stream, '__iter__')) + + data = '' + for buff in stream: + data += buff.decode('utf-8') + + self.assertTrue(len(data), 4096) + + obj.delete() + container.delete() + self.remove_tmp_file(tmppath) + + @mock.patch("lockfile.mkdirlockfile.MkdirLockFile.acquire", + mock.MagicMock(side_effect=LockTimeout)) + def test_proper_lockfile_imports(self): + # LockLocalStorage was previously using an un-imported exception + # in its __enter__ method, so the following would raise a NameError. + lls = LockLocalStorage("blah") + self.assertRaises(LibcloudError, lls.__enter__) + + +if not LocalStorageDriver: + class LocalTests(unittest.TestCase): # NOQA + pass + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/storage/test_s3.py libcloud-0.15.1/libcloud/test/storage/test_s3.py --- libcloud-0.5.0/libcloud/test/storage/test_s3.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/storage/test_s3.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,979 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import parse_qs + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.drivers.s3 import S3StorageDriver, S3USWestStorageDriver +from libcloud.storage.drivers.s3 import S3EUWestStorageDriver +from libcloud.storage.drivers.s3 import S3APSEStorageDriver +from libcloud.storage.drivers.s3 import S3APNEStorageDriver +from libcloud.storage.drivers.s3 import CHUNK_SIZE +from libcloud.storage.drivers.dummy import DummyIterator + +from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 +from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 +from libcloud.test.secrets import STORAGE_S3_PARAMS + + +class S3MockHttp(StorageMockHttp, MockHttpTestCase): + + fixtures = StorageFileFixtures('s3') + base_headers = {} + + def _UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, + '', + self.base_headers, + httplib.responses[httplib.OK]) + + def _DIFFERENT_REGION(self, method, url, body, headers): + return (httplib.MOVED_PERMANENTLY, + '', + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_containers_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers_TOKEN(self, method, url, body, headers): + self.assertEqual(headers['x-amz-security-token'], 'asdf') + body = self.fixtures.load('list_containers_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_ITERATOR(self, method, url, body, headers): + if url.find('3.zip') == -1: + # First part of the response (first 3 objects) + file_name = 'list_container_objects_not_exhausted1.xml' + else: + file_name = 'list_container_objects_not_exhausted2.xml' + + body = self.fixtures.load(file_name) + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test2_get_object(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test2_test_get_object(self, method, url, body, headers): + # test_get_object + body = self.fixtures.load('list_containers.xml') + headers = {'content-type': 'application/zip', + 'etag': '"e31208wqsdoj329jd"', + 'x-amz-meta-rabbits': 'monkeys', + 'content-length': 12345, + 'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT' + } + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_INVALID_NAME(self, method, url, body, headers): + # test_create_container + return (httplib.BAD_REQUEST, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_ALREADY_EXISTS(self, method, url, body, headers): + # test_create_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container(self, method, url, body, headers): + # test_create_container, test_delete_container + + if method == 'PUT': + status = httplib.OK + elif method == 'DELETE': + status = httplib.NO_CONTENT + + return (status, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_DOESNT_EXIST(self, method, url, body, headers): + # test_delete_container + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_NOT_EMPTY(self, method, url, body, headers): + # test_delete_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.OK]) + + def _test1_get_container(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _container1_get_container(self, method, url, body, headers): + return (httplib.NOT_FOUND, + '', + self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _test_inexistent_get_object(self, method, url, body, headers): + return (httplib.NOT_FOUND, + '', + self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _foo_bar_container(self, method, url, body, headers): + # test_delete_container + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): + # test_delete_container_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, + headers): + # test_delete_object_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_delete_object + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_stream_data(self, method, url, body, + headers): + # test_upload_object_via_stream + body = '' + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url, + body, headers): + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + TEST_ID = 'VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA' + + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if not query.get('uploadId', False): + self.fail('Request doesnt contain uploadId query parameter') + + upload_id = query['uploadId'][0] + if upload_id != TEST_ID: + self.fail('first uploadId doesnt match TEST_ID') + + if method == 'PUT': + # PUT is used for uploading the part. part number is mandatory + if not query.get('partNumber', False): + self.fail('Request is missing partNumber query parameter') + + body = '' + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + elif method == 'DELETE': + # DELETE is done for aborting the upload + body = '' + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.NO_CONTENT]) + + else: + # POST is done for committing the upload. Parse the XML and + # check if the commit is proper (TODO: XML Schema based check?) + commit = ET.fromstring(body) + count = 0 + + for part in commit.findall('Part'): + count += 1 + part_no = part.find('PartNumber').text + etag = part.find('ETag').text + + self.assertEqual(part_no, str(count)) + self.assertEqual(etag, headers['etag']) + + # Make sure that manifest contains at least one part + self.assertTrue(count >= 1) + + body = self.fixtures.load('complete_multipart.xml') + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_LIST_MULTIPART(self, method, url, body, headers): + query_string = urlparse.urlsplit(url).query + query = parse_qs(query_string) + + if 'key-marker' not in query: + body = self.fixtures.load('list_multipart_1.xml') + else: + body = self.fixtures.load('list_multipart_2.xml') + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_my_divisor_LIST_MULTIPART(self, method, url, + body, headers): + body = '' + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.NO_CONTENT]) + + def _foo_bar_container_my_movie_m2ts_LIST_MULTIPART(self, method, url, + body, headers): + body = '' + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.NO_CONTENT]) + + +class S3MockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('s3') + + def parse_body(self): + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", + body=self.body, + driver=self.connection.driver) + return body + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_download_object_success + body = self._generate_random_data(1000) + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload_INVALID_HASH1(self, method, url, + body, headers): + body = '' + headers = {} + headers['etag'] = '"foobar"' + # test_upload_object_invalid_hash1 + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload_INVALID_HASH2(self, method, url, + body, headers): + # test_upload_object_invalid_hash2 + body = '' + headers = {'etag': '"hash343hhash89h932439jsaa89"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, + body, headers): + # test_upload_object_invalid_file_size + body = '' + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_stream_data(self, method, url, body, + headers): + # test_upload_object_via_stream + body = '' + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url, + body, headers): + headers = {} + # POST is done for initiating multipart upload + if method == 'POST': + body = self.fixtures.load('initiate_multipart.xml') + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + else: + body = '' + return (httplib.BAD_REQUEST, + body, + headers, + httplib.responses[httplib.BAD_REQUEST]) + + +class S3Tests(unittest.TestCase): + driver_type = S3StorageDriver + driver_args = STORAGE_S3_PARAMS + mock_response_klass = S3MockHttp + mock_raw_response_klass = S3MockRawResponse + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args) + + def setUp(self): + self.driver_type.connectionCls.conn_classes = (None, + self.mock_response_klass) + self.driver_type.connectionCls.rawResponseCls = \ + self.mock_raw_response_klass + self.mock_response_klass.type = None + self.mock_raw_response_klass.type = None + self.driver = self.create_driver() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_invalid_credentials(self): + self.mock_response_klass.type = 'UNAUTHORIZED' + try: + self.driver.list_containers() + except InvalidCredsError: + e = sys.exc_info()[1] + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('Exception was not thrown') + + def test_token(self): + self.mock_response_klass.type = 'list_containers_TOKEN' + self.driver = self.driver_type(*self.driver_args, token='asdf') + self.driver.list_containers() + + def test_bucket_is_located_in_different_region(self): + self.mock_response_klass.type = 'DIFFERENT_REGION' + try: + self.driver.list_containers() + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_containers_empty(self): + self.mock_response_klass.type = 'list_containers_EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + def test_list_containers_success(self): + self.mock_response_klass.type = 'list_containers' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 2) + + self.assertTrue('creation_date' in containers[1].extra) + + def test_list_container_objects_empty(self): + self.mock_response_klass.type = 'EMPTY' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + def test_list_container_objects_success(self): + self.mock_response_klass.type = None + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 1) + + obj = [o for o in objects if o.name == '1.zip'][0] + self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') + self.assertEqual(obj.size, 1234567) + self.assertEqual(obj.container.name, 'test_container') + self.assertEqual( + obj.extra['last_modified'], '2011-04-09T19:05:18.000Z') + self.assertTrue('owner' in obj.meta_data) + + def test_list_container_objects_iterator_has_more(self): + self.mock_response_klass.type = 'ITERATOR' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + + obj = [o for o in objects if o.name == '1.zip'][0] + self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') + self.assertEqual(obj.size, 1234567) + self.assertEqual(obj.container.name, 'test_container') + + self.assertTrue(obj in objects) + self.assertEqual(len(objects), 5) + + def test_list_container_objects_with_prefix(self): + self.mock_response_klass.type = None + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container, + ex_prefix='test_prefix') + self.assertEqual(len(objects), 1) + + obj = [o for o in objects if o.name == '1.zip'][0] + self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') + self.assertEqual(obj.size, 1234567) + self.assertEqual(obj.container.name, 'test_container') + self.assertTrue('owner' in obj.meta_data) + + def test_get_container_doesnt_exist(self): + self.mock_response_klass.type = 'get_container' + try: + self.driver.get_container(container_name='container1') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_container_success(self): + self.mock_response_klass.type = 'get_container' + container = self.driver.get_container(container_name='test1') + self.assertTrue(container.name, 'test1') + + def test_get_object_container_doesnt_exist(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = 'get_object' + try: + self.driver.get_object(container_name='test-inexistent', + object_name='test') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = 'get_object' + obj = self.driver.get_object(container_name='test2', + object_name='test') + + self.assertEqual(obj.name, 'test') + self.assertEqual(obj.container.name, 'test2') + self.assertEqual(obj.size, 12345) + self.assertEqual(obj.hash, 'e31208wqsdoj329jd') + self.assertEqual(obj.extra['last_modified'], + 'Thu, 13 Sep 2012 07:13:22 GMT') + self.assertEqual(obj.extra['content_type'], 'application/zip') + self.assertEqual(obj.meta_data['rabbits'], 'monkeys') + + def test_create_container_invalid_name(self): + # invalid container name + self.mock_response_klass.type = 'INVALID_NAME' + try: + self.driver.create_container(container_name='new_container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_already_exists(self): + # container with this name already exists + self.mock_response_klass.type = 'ALREADY_EXISTS' + try: + self.driver.create_container(container_name='new-container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + # success + self.mock_response_klass.type = None + name = 'new_container' + container = self.driver.create_container(container_name=name) + self.assertEqual(container.name, name) + + def test_delete_container_doesnt_exist(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'DOESNT_EXIST' + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_not_empty(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'NOT_EMPTY' + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Exception was not thrown') + + # success + self.mock_response_klass.type = None + self.assertTrue(self.driver.delete_container(container=container)) + + def test_delete_container_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Container does not exist but an exception was not' + + 'thrown') + + def test_delete_container_success(self): + self.mock_response_klass.type = None + container = Container(name='new_container', extra=None, + driver=self.driver) + self.assertTrue(self.driver.delete_container(container=container)) + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_invalid_file_already_exists(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + destination_path = os.path.abspath(__file__) + try: + self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_download_object_as_stream_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver_type) + + stream = self.driver.download_object_as_stream(obj=obj, + chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_invalid_ex_storage_class(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True, + ex_storage_class='invalid-class') + except ValueError: + e = sys.exc_info()[1] + self.assertTrue(str(e).lower().find('invalid storage class') != -1) + else: + self.fail('Exception was not thrown') + + def test_upload_object_invalid_hash1(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + self.mock_raw_response_klass.type = 'INVALID_HASH1' + + old_func = self.driver_type._upload_file + self.driver_type._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + self.driver_type._upload_file = old_func + + def test_upload_object_invalid_hash2(self): + # Invalid hash is detected when comparing hash provided in the response + # ETag header + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, '0cc175b9c0f1b6a831c399e269772661', 1000 + + self.mock_raw_response_klass.type = 'INVALID_HASH2' + + old_func = self.driver_type._upload_file + self.driver_type._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + self.driver_type._upload_file = old_func + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, '0cc175b9c0f1b6a831c399e269772661', 1000 + + old_func = self.driver_type._upload_file + self.driver_type._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=True) + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + self.driver_type._upload_file = old_func + + def test_upload_object_with_acl(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, '0cc175b9c0f1b6a831c399e269772661', 1000 + + old_func = self.driver_type._upload_file + self.driver_type._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'acl': 'public-read'} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=True) + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, 1000) + self.assertEqual(obj.extra['acl'], 'public-read') + self.driver_type._upload_file = old_func + + def test_upload_empty_object_via_stream(self): + if self.driver.supports_s3_multipart_upload: + self.mock_raw_response_klass.type = 'MULTIPART' + self.mock_response_klass.type = 'MULTIPART' + else: + self.mock_raw_response_klass.type = None + self.mock_response_klass.type = None + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_stream_data' + iterator = DummyIterator(data=['']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 0) + + def test_upload_small_object_via_stream(self): + if self.driver.supports_s3_multipart_upload: + self.mock_raw_response_klass.type = 'MULTIPART' + self.mock_response_klass.type = 'MULTIPART' + else: + self.mock_raw_response_klass.type = None + self.mock_response_klass.type = None + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_stream_data' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + + def test_upload_big_object_via_stream(self): + if self.driver.supports_s3_multipart_upload: + self.mock_raw_response_klass.type = 'MULTIPART' + self.mock_response_klass.type = 'MULTIPART' + else: + self.mock_raw_response_klass.type = None + self.mock_response_klass.type = None + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_stream_data' + iterator = DummyIterator( + data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1) + + def test_upload_object_via_stream_abort(self): + if not self.driver.supports_s3_multipart_upload: + return + + self.mock_raw_response_klass.type = 'MULTIPART' + self.mock_response_klass.type = 'MULTIPART' + + def _faulty_iterator(): + for i in range(0, 5): + yield str(i) + raise RuntimeError('Error in fetching data') + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_stream_data' + iterator = _faulty_iterator() + extra = {'content_type': 'text/plain'} + + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra) + except Exception: + pass + + return + + def test_s3_list_multipart_uploads(self): + if not self.driver.supports_s3_multipart_upload: + return + + self.mock_response_klass.type = 'LIST_MULTIPART' + S3StorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + for upload in self.driver.ex_iterate_multipart_uploads(container): + self.assertNotEqual(upload.key, None) + self.assertNotEqual(upload.id, None) + self.assertNotEqual(upload.created_at, None) + self.assertNotEqual(upload.owner, None) + self.assertNotEqual(upload.initiator, None) + + def test_s3_abort_multipart_uploads(self): + if not self.driver.supports_s3_multipart_upload: + return + + self.mock_response_klass.type = 'LIST_MULTIPART' + S3StorageDriver.RESPONSES_PER_REQUEST = 2 + + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + self.driver.ex_cleanup_all_multipart_uploads(container) + + def test_delete_object_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + + result = self.driver.delete_object(obj=obj) + self.assertTrue(result) + + +class S3USWestTests(S3Tests): + driver_type = S3USWestStorageDriver + + +class S3EUWestTests(S3Tests): + driver_type = S3EUWestStorageDriver + + +class S3APSETests(S3Tests): + driver_type = S3APSEStorageDriver + + +class S3APNETests(S3Tests): + driver_tyoe = S3APNEStorageDriver + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_connection.py libcloud-0.15.1/libcloud/test/test_connection.py --- libcloud-0.5.0/libcloud/test/test_connection.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_connection.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import ssl + +from mock import Mock, call + +from libcloud.test import unittest +from libcloud.common.base import Connection +from libcloud.common.base import LoggingConnection + + +class ConnectionClassTestCase(unittest.TestCase): + def setUp(self): + self.originalConnect = Connection.connect + self.originalResponseCls = Connection.responseCls + + Connection.connect = Mock() + Connection.responseCls = Mock() + Connection.allow_insecure = True + + def tearDown(self): + Connection.connect = self.originalConnect + Connection.responseCls = Connection.responseCls + Connection.allow_insecure = True + + def test_dont_allow_insecure(self): + Connection.allow_insecure = True + Connection(secure=False) + + Connection.allow_insecure = False + + expected_msg = (r'Non https connections are not allowed \(use ' + 'secure=True\)') + self.assertRaisesRegexp(ValueError, expected_msg, Connection, + secure=False) + + def test_content_length(self): + con = Connection() + con.connection = Mock() + + # GET method + # No data, no content length should be present + con.request('/test', method='GET', data=None) + call_kwargs = con.connection.request.call_args[1] + self.assertTrue('Content-Length' not in call_kwargs['headers']) + + # '' as data, no content length should be present + con.request('/test', method='GET', data='') + call_kwargs = con.connection.request.call_args[1] + self.assertTrue('Content-Length' not in call_kwargs['headers']) + + # 'a' as data, content length should be present (data in GET is not + # correct, but anyways) + con.request('/test', method='GET', data='a') + call_kwargs = con.connection.request.call_args[1] + self.assertEqual(call_kwargs['headers']['Content-Length'], '1') + + # POST, PUT method + # No data, content length should be present + for method in ['POST', 'PUT', 'post', 'put']: + con.request('/test', method=method, data=None) + call_kwargs = con.connection.request.call_args[1] + self.assertEqual(call_kwargs['headers']['Content-Length'], '0') + + # '' as data, content length should be present + for method in ['POST', 'PUT', 'post', 'put']: + con.request('/test', method=method, data='') + call_kwargs = con.connection.request.call_args[1] + self.assertEqual(call_kwargs['headers']['Content-Length'], '0') + + # No data, raw request, do not touch Content-Length if present + for method in ['POST', 'PUT', 'post', 'put']: + con.request('/test', method=method, data=None, + headers={'Content-Length': '42'}, raw=True) + putheader_call_list = con.connection.putheader.call_args_list + self.assertIn(call('Content-Length', '42'), putheader_call_list) + + # '' as data, raw request, do not touch Content-Length if present + for method in ['POST', 'PUT', 'post', 'put']: + con.request('/test', method=method, data=None, + headers={'Content-Length': '42'}, raw=True) + putheader_call_list = con.connection.putheader.call_args_list + self.assertIn(call('Content-Length', '42'), putheader_call_list) + + # 'a' as data, content length should be present + for method in ['POST', 'PUT', 'post', 'put']: + con.request('/test', method=method, data='a') + call_kwargs = con.connection.request.call_args[1] + self.assertEqual(call_kwargs['headers']['Content-Length'], '1') + + def test_cache_busting(self): + params1 = {'foo1': 'bar1', 'foo2': 'bar2'} + params2 = [('foo1', 'bar1'), ('foo2', 'bar2')] + + con = Connection() + con.connection = Mock() + con.pre_connect_hook = Mock() + con.pre_connect_hook.return_value = {}, {} + con.cache_busting = False + + con.request(action='/path', params=params1) + args, kwargs = con.pre_connect_hook.call_args + self.assertFalse('cache-busting' in args[0]) + self.assertEqual(args[0], params1) + + con.request(action='/path', params=params2) + args, kwargs = con.pre_connect_hook.call_args + self.assertFalse('cache-busting' in args[0]) + self.assertEqual(args[0], params2) + + con.cache_busting = True + + con.request(action='/path', params=params1) + args, kwargs = con.pre_connect_hook.call_args + self.assertTrue('cache-busting' in args[0]) + + con.request(action='/path', params=params2) + args, kwargs = con.pre_connect_hook.call_args + self.assertTrue('cache-busting' in args[0][len(params2)]) + + def test_context_is_reset_after_request_has_finished(self): + context = {'foo': 'bar'} + + def responseCls(connection, response): + connection.called = True + self.assertEqual(connection.context, context) + + con = Connection() + con.called = False + con.connection = Mock() + con.responseCls = responseCls + + con.set_context(context) + self.assertEqual(con.context, context) + + con.request('/') + + # Context should have been reset + self.assertTrue(con.called) + self.assertEqual(con.context, {}) + + # Context should also be reset if a method inside request throws + con = Connection() + con.connection = Mock() + + con.set_context(context) + self.assertEqual(con.context, context) + + con.connection.request = Mock(side_effect=ssl.SSLError()) + + try: + con.request('/') + except ssl.SSLError: + pass + + self.assertEqual(con.context, {}) + + con.connection = Mock() + con.set_context(context) + self.assertEqual(con.context, context) + + con.responseCls = Mock(side_effect=ValueError()) + + try: + con.request('/') + except ValueError: + pass + + self.assertEqual(con.context, {}) + + def test_log_curl(self): + url = '/test/path' + body = None + headers = {} + + con = LoggingConnection() + con.protocol = 'http' + con.host = 'example.com' + con.port = 80 + + for method in ['GET', 'POST', 'PUT', 'DELETE']: + cmd = con._log_curl(method=method, url=url, body=body, + headers=headers) + self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' % + (method)) + + # Should use --head for head requests + cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) + self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_file_fixtures.py libcloud-0.15.1/libcloud/test/test_file_fixtures.py --- libcloud-0.5.0/libcloud/test/test_file_fixtures.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_file_fixtures.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.test.file_fixtures import ComputeFileFixtures + + +class FileFixturesTests(unittest.TestCase): + + def test_success(self): + f = ComputeFileFixtures('meta') + self.assertEqual("Hello, World!", f.load('helloworld.txt')) + + def test_failure(self): + f = ComputeFileFixtures('meta') + self.assertRaises(IOError, f.load, 'nil') + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_httplib_ssl.py libcloud-0.15.1/libcloud/test/test_httplib_ssl.py --- libcloud-0.5.0/libcloud/test/test_httplib_ssl.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_httplib_ssl.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,236 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import os.path + +from mock import patch + +import libcloud.security + +from libcloud.utils.py3 import reload +from libcloud.httplib_ssl import LibcloudHTTPSConnection + +from libcloud.test import unittest + +ORIGINAL_CA_CERS_PATH = libcloud.security.CA_CERTS_PATH + + +class TestHttpLibSSLTests(unittest.TestCase): + + def setUp(self): + libcloud.security.VERIFY_SSL_CERT = False + libcloud.security.CA_CERTS_PATH = ORIGINAL_CA_CERS_PATH + self.httplib_object = LibcloudHTTPSConnection('foo.bar') + + def test_custom_ca_path_using_env_var_doesnt_exist(self): + os.environ['SSL_CERT_FILE'] = '/foo/doesnt/exist' + + try: + reload(libcloud.security) + except ValueError: + e = sys.exc_info()[1] + msg = 'Certificate file /foo/doesnt/exist doesn\'t exist' + self.assertEqual(str(e), msg) + else: + self.fail('Exception was not thrown') + + def test_custom_ca_path_using_env_var_is_directory(self): + file_path = os.path.dirname(os.path.abspath(__file__)) + os.environ['SSL_CERT_FILE'] = file_path + + expected_msg = 'Certificate file can\'t be a directory' + self.assertRaisesRegexp(ValueError, expected_msg, + reload, libcloud.security) + + def test_custom_ca_path_using_env_var_exist(self): + # When setting a path we don't actually check that a valid CA file is + # provided. + # This happens later in the code in httplib_ssl.connect method + file_path = os.path.abspath(__file__) + os.environ['SSL_CERT_FILE'] = file_path + + reload(libcloud.security) + + self.assertEqual(libcloud.security.CA_CERTS_PATH, [file_path]) + + def test_verify_hostname(self): + # commonName + cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + # commonName + cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),)), + 'subjectAltName': ((('DNS', 'foo.alt.name')), + (('DNS', 'foo.alt.name.1')))} + + # commonName + cert3 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'python.org'),))} + + # wildcard commonName + cert4 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', '*.api.joyentcloud.com'),))} + + self.assertFalse(self.httplib_object._verify_hostname( + hostname='invalid', cert=cert1)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='machine.python.org', cert=cert1)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='foomachine.python.org', cert=cert1)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='somesomemachine.python.org', cert=cert1)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='somemachine.python.orga', cert=cert1)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='somemachine.python.org.org', cert=cert1)) + self.assertTrue(self.httplib_object._verify_hostname( + hostname='somemachine.python.org', cert=cert1)) + + self.assertFalse(self.httplib_object._verify_hostname( + hostname='invalid', cert=cert2)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='afoo.alt.name.1', cert=cert2)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='a.foo.alt.name.1', cert=cert2)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='foo.alt.name.1.2', cert=cert2)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='afoo.alt.name.1.2', cert=cert2)) + self.assertTrue(self.httplib_object._verify_hostname( + hostname='foo.alt.name.1', cert=cert2)) + + self.assertTrue(self.httplib_object._verify_hostname( + hostname='python.org', cert=cert3)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='opython.org', cert=cert3)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='ython.org', cert=cert3)) + + self.assertTrue(self.httplib_object._verify_hostname( + hostname='us-east-1.api.joyentcloud.com', cert=cert4)) + self.assertTrue(self.httplib_object._verify_hostname( + hostname='useast-1.api.joyentcloud.com', cert=cert4)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='t1.useast-1.api.joyentcloud.com', cert=cert4)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='ponies.useast-1.api.joyentcloud.com', cert=cert4)) + self.assertFalse(self.httplib_object._verify_hostname( + hostname='api.useast-1.api.joyentcloud.com', cert=cert4)) + + def test_get_subject_alt_names(self): + cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),)), + 'subjectAltName': ((('DNS', 'foo.alt.name')), + (('DNS', 'foo.alt.name.1')))} + + self.assertEqual(self.httplib_object._get_subject_alt_names(cert=cert1), + []) + + alt_names = self.httplib_object._get_subject_alt_names(cert=cert2) + self.assertEqual(len(alt_names), 2) + self.assertTrue('foo.alt.name' in alt_names) + self.assertTrue('foo.alt.name.1' in alt_names) + + def test_get_common_name(self): + cert = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + self.assertEqual(self.httplib_object._get_common_name(cert)[0], + 'somemachine.python.org') + self.assertEqual(self.httplib_object._get_common_name({}), + None) + + @patch('warnings.warn') + def test_setup_verify(self, _): + libcloud.security.CA_CERTS_PATH = [] + + # Should throw a runtime error + libcloud.security.VERIFY_SSL_CERT = True + + expected_msg = libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG + self.assertRaisesRegexp(RuntimeError, expected_msg, + self.httplib_object._setup_verify) + + libcloud.security.VERIFY_SSL_CERT = False + self.httplib_object._setup_verify() + + @patch('warnings.warn') + def test_setup_ca_cert(self, _): + # verify = False, _setup_ca_cert should be a no-op + self.httplib_object.verify = False + self.httplib_object._setup_ca_cert() + + self.assertEqual(self.httplib_object.ca_cert, None) + + # verify = True, a valid path is provided, self.ca_cert should be set to + # a valid path + self.httplib_object.verify = True + + libcloud.security.CA_CERTS_PATH = [os.path.abspath(__file__)] + self.httplib_object._setup_ca_cert() + + self.assertTrue(self.httplib_object.ca_cert is not None) + + # verify = True, no CA certs are available, exception should be thrown + libcloud.security.CA_CERTS_PATH = [] + + expected_msg = libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG + self.assertRaisesRegexp(RuntimeError, expected_msg, + self.httplib_object._setup_ca_cert) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_pricing.py libcloud-0.15.1/libcloud/test/test_pricing.py --- libcloud-0.5.0/libcloud/test/test_pricing.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_pricing.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path +import sys +import unittest + +import libcloud.pricing + +PRICING_FILE_PATH = os.path.join(os.path.dirname(__file__), 'pricing_test.json') + + +class PricingTestCase(unittest.TestCase): + + def test_get_pricing_success(self): + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + pricing = libcloud.pricing.get_pricing(driver_type='compute', + driver_name='foo', + pricing_file_path=PRICING_FILE_PATH) + self.assertEqual(pricing['1'], 1.0) + self.assertEqual(pricing['2'], 2.0) + + self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['1'], 1.0) + self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['2'], 2.0) + + def test_get_pricing_invalid_file_path(self): + try: + libcloud.pricing.get_pricing(driver_type='compute', driver_name='bar', + pricing_file_path='inexistent.json') + except IOError: + pass + else: + self.fail('Invalid pricing file path provided, but an exception was not' + ' thrown') + + def test_get_pricing_invalid_driver_type(self): + try: + libcloud.pricing.get_pricing(driver_type='invalid_type', driver_name='bar', + pricing_file_path='inexistent.json') + except AttributeError: + pass + else: + self.fail('Invalid driver_type provided, but an exception was not' + ' thrown') + + def test_get_pricing_not_in_cache(self): + try: + libcloud.pricing.get_pricing(driver_type='compute', driver_name='inexistent', + pricing_file_path=PRICING_FILE_PATH) + except KeyError: + pass + else: + self.fail('Invalid driver provided, but an exception was not' + ' thrown') + + def test_get_size_price(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = {2: 2, '3': 3} + price1 = libcloud.pricing.get_size_price(driver_type='compute', + driver_name='foo', + size_id=2) + price2 = libcloud.pricing.get_size_price(driver_type='compute', + driver_name='foo', + size_id='3') + self.assertEqual(price1, 2) + self.assertEqual(price2, 3) + + def test_invalid_pricing_cache(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = {2: 2} + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.invalidate_pricing_cache() + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + def test_invalid_module_pricing_cache(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = {1: 1} + + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', + driver_name='foo') + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', + driver_name='foo1') + + def test_set_pricing(self): + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.set_pricing(driver_type='compute', driver_name='foo', + pricing={'foo': 1}) + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_response_classes.py libcloud-0.15.1/libcloud/test/test_response_classes.py --- libcloud-0.5.0/libcloud/test/test_response_classes.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_response_classes.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,151 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import zlib +import gzip + +from mock import Mock + +from libcloud.utils.py3 import httplib, b, StringIO, PY3 +from libcloud.common.base import Response, XmlResponse, JsonResponse +from libcloud.common.types import MalformedResponseError + + +class ResponseClassesTests(unittest.TestCase): + def setUp(self): + self._mock_response = Mock() + self._mock_response.getheaders.return_value = [] + self._mock_response.status = httplib.OK + self._mock_response._original_data = None + self._mock_connection = Mock() + + def test_XmlResponse_class(self): + self._mock_response.read.return_value = 'bar' + response = XmlResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed.tag, 'foo') + self.assertEqual(parsed.text, 'bar') + + def test_XmlResponse_class_malformed_response(self): + self._mock_response.read.return_value = '' + + try: + XmlResponse(response=self._mock_response, + connection=self._mock_connection) + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_XmlResponse_class_zero_length_body_strip(self): + self._mock_response.read.return_value = ' ' + + response = XmlResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, '') + + def test_JsonResponse_class_success(self): + self._mock_response.read.return_value = '{"foo": "bar"}' + response = JsonResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, {'foo': 'bar'}) + + def test_JsonResponse_class_malformed_response(self): + self._mock_response.read.return_value = '{"foo": "bar' + + try: + JsonResponse(response=self._mock_response, + connection=self._mock_connection) + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_JsonResponse_class_zero_length_body_strip(self): + self._mock_response.read.return_value = ' ' + + response = JsonResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, '') + + def test_deflate_encoding(self): + original_data = 'foo bar ponies, wooo zlib' + compressed_data = zlib.compress(b(original_data)) + + self._mock_response.read.return_value = compressed_data + self._mock_response.getheaders.return_value = \ + {'Content-Encoding': 'deflate'} + + response = Response(response=self._mock_response, + connection=self._mock_connection) + + body = response.parse_body() + self.assertEqual(body, original_data) + + self._mock_response.getheaders.return_value = \ + {'Content-Encoding': 'zlib'} + + response = Response(response=self._mock_response, + connection=self._mock_connection) + + body = response.parse_body() + self.assertEqual(body, original_data) + + def test_gzip_encoding(self): + original_data = 'foo bar ponies, wooo gzip' + + if PY3: + from io import BytesIO + string_io = BytesIO() + else: + string_io = StringIO() + + stream = gzip.GzipFile(fileobj=string_io, mode='w') + stream.write(b(original_data)) + stream.close() + compressed_data = string_io.getvalue() + + self._mock_response.read.return_value = compressed_data + self._mock_response.getheaders.return_value = \ + {'Content-Encoding': 'gzip'} + + response = Response(response=self._mock_response, + connection=self._mock_connection) + + body = response.parse_body() + self.assertEqual(body, original_data) + + self._mock_response.getheaders.return_value = \ + {'Content-Encoding': 'x-gzip'} + + response = Response(response=self._mock_response, + connection=self._mock_connection) + + body = response.parse_body() + self.assertEqual(body, original_data) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_types.py libcloud-0.15.1/libcloud/test/test_types.py --- libcloud-0.5.0/libcloud/test/test_types.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_types.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,112 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.common.types import LazyList + + +class TestLazyList(unittest.TestCase): + def setUp(self): + super(TestLazyList, self).setUp + self._get_more_counter = 0 + + def tearDown(self): + super(TestLazyList, self).tearDown + + def test_init(self): + data = [1, 2, 3, 4, 5] + ll = LazyList(get_more=self._get_more_exhausted) + ll_list = list(ll) + self.assertEqual(ll_list, data) + + def test_iterator(self): + data = [1, 2, 3, 4, 5] + ll = LazyList(get_more=self._get_more_exhausted) + for i, d in enumerate(ll): + self.assertEqual(d, data[i]) + + def test_empty_list(self): + ll = LazyList(get_more=self._get_more_empty) + + self.assertEqual(list(ll), []) + self.assertEqual(len(ll), 0) + self.assertTrue(10 not in ll) + + def test_iterator_not_exhausted(self): + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + ll = LazyList(get_more=self._get_more_not_exhausted) + number_of_iterations = 0 + for i, d in enumerate(ll): + self.assertEqual(d, data[i]) + number_of_iterations += 1 + self.assertEqual(number_of_iterations, 10) + + def test_len(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(len(ll), 10) + + def test_contains(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertTrue(40 not in ll) + self.assertTrue(1 in ll) + self.assertTrue(5 in ll) + self.assertTrue(10 in ll) + + def test_indexing(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(ll[0], 1) + self.assertEqual(ll[9], 10) + self.assertEqual(ll[-1], 10) + + try: + ll[11] + except IndexError: + pass + else: + self.fail('Exception was not thrown') + + def test_repr(self): + ll1 = LazyList(get_more=self._get_more_empty) + ll2 = LazyList(get_more=self._get_more_exhausted) + ll3 = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(repr(ll1), '[]') + self.assertEqual(repr(ll2), '[1, 2, 3, 4, 5]') + self.assertEqual(repr(ll3), '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]') + + def _get_more_empty(self, last_key, value_dict): + return [], None, True + + def _get_more_exhausted(self, last_key, value_dict): + data = [1, 2, 3, 4, 5] + return data, 5, True + + def _get_more_not_exhausted(self, last_key, value_dict): + self._get_more_counter += 1 + if not last_key: + data, last_key, exhausted = [1, 2, 3, 4, 5], 5, False + else: + data, last_key, exhausted = [6, 7, 8, 9, 10], 10, True + + return data, last_key, exhausted + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/test/test_utils.py libcloud-0.15.1/libcloud/test/test_utils.py --- libcloud-0.5.0/libcloud/test/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/test/test_utils.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import socket +import codecs +import unittest +import warnings +import os.path + +from itertools import chain + +# In Python > 2.7 DeprecationWarnings are disabled by default +warnings.simplefilter('default') + +import libcloud.utils.files + +from libcloud.utils.misc import get_driver, set_driver + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import StringIO +from libcloud.utils.py3 import b +from libcloud.utils.py3 import bchr +from libcloud.utils.py3 import hexadigits +from libcloud.utils.py3 import urlquote +from libcloud.compute.types import Provider +from libcloud.compute.providers import DRIVERS +from libcloud.utils.misc import get_secure_random_string +from libcloud.utils.networking import is_public_subnet +from libcloud.utils.networking import is_private_subnet +from libcloud.utils.networking import is_valid_ip_address +from libcloud.storage.drivers.dummy import DummyIterator + + +WARNINGS_BUFFER = [] + +if PY3: + from io import FileIO as file + + +def show_warning(msg, cat, fname, lno, line=None): + WARNINGS_BUFFER.append((msg, cat, fname, lno)) + +original_func = warnings.showwarning + + +class TestUtils(unittest.TestCase): + def setUp(self): + global WARNINGS_BUFFER + WARNINGS_BUFFER = [] + + def tearDown(self): + global WARNINGS_BUFFER + WARNINGS_BUFFER = [] + warnings.showwarning = original_func + + def test_guess_file_mime_type(self): + file_path = os.path.abspath(__file__) + mimetype, encoding = libcloud.utils.files.guess_file_mime_type( + file_path=file_path) + + self.assertTrue(mimetype.find('python') != -1) + + def test_get_driver(self): + driver = get_driver(drivers=DRIVERS, provider=Provider.DUMMY) + self.assertTrue(driver is not None) + + try: + driver = get_driver(drivers=DRIVERS, provider='fooba') + except AttributeError: + pass + else: + self.fail('Invalid provider, but an exception was not thrown') + + def test_set_driver(self): + # Set an existing driver + try: + driver = set_driver(DRIVERS, Provider.DUMMY, + 'libcloud.storage.drivers.dummy', + 'DummyStorageDriver') + except AttributeError: + pass + + # Register a new driver + driver = set_driver(DRIVERS, 'testingset', + 'libcloud.storage.drivers.dummy', + 'DummyStorageDriver') + + self.assertTrue(driver is not None) + + # Register it again + try: + set_driver(DRIVERS, 'testingset', + 'libcloud.storage.drivers.dummy', + 'DummyStorageDriver') + except AttributeError: + pass + + # Register an invalid module + try: + set_driver(DRIVERS, 'testingnew', + 'libcloud.storage.drivers.dummy1', + 'DummyStorageDriver') + except ImportError: + pass + + # Register an invalid class + try: + set_driver(DRIVERS, 'testingnew', + 'libcloud.storage.drivers.dummy', + 'DummyStorageDriver1') + except AttributeError: + pass + + def test_deprecated_warning(self): + warnings.showwarning = show_warning + + libcloud.utils.SHOW_DEPRECATION_WARNING = False + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.deprecated_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 0) + + libcloud.utils.SHOW_DEPRECATION_WARNING = True + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.deprecated_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 1) + + def test_in_development_warning(self): + warnings.showwarning = show_warning + + libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.in_development_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 0) + + libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.in_development_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 1) + + def test_read_in_chunks_iterator_no_data(self): + iterator = DummyIterator() + generator1 = libcloud.utils.files.read_in_chunks(iterator=iterator, + yield_empty=False) + generator2 = libcloud.utils.files.read_in_chunks(iterator=iterator, + yield_empty=True) + + # yield_empty=False + count = 0 + for data in generator1: + count += 1 + self.assertEqual(data, b('')) + + self.assertEqual(count, 0) + + # yield_empty=True + count = 0 + for data in generator2: + count += 1 + self.assertEqual(data, b('')) + + self.assertEqual(count, 1) + + def test_read_in_chunks_iterator(self): + def iterator(): + for x in range(0, 1000): + yield 'aa' + + for result in libcloud.utils.files.read_in_chunks(iterator(), + chunk_size=10, + fill_size=False): + self.assertEqual(result, b('aa')) + + for result in libcloud.utils.files.read_in_chunks(iterator(), chunk_size=10, + fill_size=True): + self.assertEqual(result, b('aaaaaaaaaa')) + + def test_read_in_chunks_filelike(self): + class FakeFile(file): + def __init__(self): + self.remaining = 500 + + def read(self, size): + self.remaining -= 1 + if self.remaining == 0: + return '' + return 'b' * (size + 1) + + for index, result in enumerate(libcloud.utils.files.read_in_chunks( + FakeFile(), chunk_size=10, + fill_size=False)): + self.assertEqual(result, b('b' * 11)) + + self.assertEqual(index, 498) + + for index, result in enumerate(libcloud.utils.files.read_in_chunks( + FakeFile(), chunk_size=10, + fill_size=True)): + if index != 548: + self.assertEqual(result, b('b' * 10)) + else: + self.assertEqual(result, b('b' * 9)) + + self.assertEqual(index, 548) + + def test_exhaust_iterator(self): + def iterator_func(): + for x in range(0, 1000): + yield 'aa' + + data = b('aa' * 1000) + iterator = libcloud.utils.files.read_in_chunks(iterator=iterator_func()) + result = libcloud.utils.files.exhaust_iterator(iterator=iterator) + self.assertEqual(result, data) + + result = libcloud.utils.files.exhaust_iterator(iterator=iterator_func()) + self.assertEqual(result, data) + + data = '12345678990' + iterator = StringIO(data) + result = libcloud.utils.files.exhaust_iterator(iterator=iterator) + self.assertEqual(result, b(data)) + + def test_exhaust_iterator_empty_iterator(self): + data = '' + iterator = StringIO(data) + result = libcloud.utils.files.exhaust_iterator(iterator=iterator) + self.assertEqual(result, b(data)) + + def test_unicode_urlquote(self): + # Regression tests for LIBCLOUD-429 + if PY3: + # Note: this is a unicode literal + val = '\xe9' + else: + val = codecs.unicode_escape_decode('\xe9')[0] + + uri = urlquote(val) + self.assertEqual(b(uri), b('%C3%A9')) + + # Unicode without unicode characters + uri = urlquote('~abc') + self.assertEqual(b(uri), b('%7Eabc')) + + # Already-encoded bytestring without unicode characters + uri = urlquote(b('~abc')) + self.assertEqual(b(uri), b('%7Eabc')) + + def test_get_secure_random_string(self): + for i in range(1, 500): + value = get_secure_random_string(size=i) + self.assertEqual(len(value), i) + + def test_hexadigits(self): + self.assertEqual(hexadigits(b('')), []) + self.assertEqual(hexadigits(b('a')), ['61']) + self.assertEqual(hexadigits(b('AZaz09-')), + ['41', '5a', '61', '7a', '30', '39', '2d']) + + def test_bchr(self): + if PY3: + self.assertEqual(bchr(0), b'\x00') + self.assertEqual(bchr(97), b'a') + else: + self.assertEqual(bchr(0), '\x00') + self.assertEqual(bchr(97), 'a') + + +class NetworkingUtilsTestCase(unittest.TestCase): + def test_is_public_and_is_private_subnet(self): + public_ips = [ + '213.151.0.8', + '86.87.86.1', + '8.8.8.8', + '8.8.4.4' + ] + + private_ips = [ + '192.168.1.100', + '10.0.0.1', + '172.16.0.0' + ] + + for address in public_ips: + is_public = is_public_subnet(ip=address) + is_private = is_private_subnet(ip=address) + + self.assertTrue(is_public) + self.assertFalse(is_private) + + for address in private_ips: + is_public = is_public_subnet(ip=address) + is_private = is_private_subnet(ip=address) + + self.assertFalse(is_public) + self.assertTrue(is_private) + + def test_is_valid_ip_address(self): + valid_ipv4_addresses = [ + '192.168.1.100', + '10.0.0.1', + '213.151.0.8', + '77.77.77.77' + ] + + invalid_ipv4_addresses = [ + '10.1', + '256.256.256.256', + '0.567.567.567', + '192.168.0.257' + ] + + valid_ipv6_addresses = [ + 'fe80::200:5aee:feaa:20a2', + '2607:f0d0:1002:51::4', + '2607:f0d0:1002:0051:0000:0000:0000:0004', + '::1' + ] + + invalid_ipv6_addresses = [ + '2607:f0d', + '2607:f0d0:0004', + ] + + for address in valid_ipv4_addresses: + status = is_valid_ip_address(address=address, + family=socket.AF_INET) + self.assertTrue(status) + + for address in valid_ipv6_addresses: + status = is_valid_ip_address(address=address, + family=socket.AF_INET6) + self.assertTrue(status) + + for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses): + status = is_valid_ip_address(address=address, + family=socket.AF_INET) + self.assertFalse(status) + + for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses): + status = is_valid_ip_address(address=address, + family=socket.AF_INET6) + self.assertFalse(status) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/libcloud/types.py libcloud-0.15.1/libcloud/types.py --- libcloud-0.5.0/libcloud/types.py 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/libcloud/types.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.common.types import LibcloudError, MalformedResponseError -from libcloud.common.types import InvalidCredsError, InvalidCredsException -from libcloud.compute.types import Provider, NodeState, DeploymentError -from libcloud.compute.types import DeploymentException - -from libcloud.utils import deprecated_warning - -__all__ = ["LibcloudError", "MalformedResponseError", - "InvalidCredsError", "InvalidCredsException", - "Provider", "NodeState", "DeploymentError", - "DeploymentException" - ] -deprecated_warning(__name__) diff -Nru libcloud-0.5.0/libcloud/utils/compression.py libcloud-0.15.1/libcloud/utils/compression.py --- libcloud-0.5.0/libcloud/utils/compression.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/compression.py 2013-08-30 12:21:18.000000000 +0000 @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import zlib +import gzip + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import StringIO + + +__all__ = [ + 'decompress_data' +] + + +def decompress_data(compression_type, data): + if compression_type == 'zlib': + return zlib.decompress(data) + elif compression_type == 'gzip': + # TODO: Should export BytesIO as StringIO in libcloud.utils.py3 + if PY3: + from io import BytesIO + cls = BytesIO + else: + cls = StringIO + + return gzip.GzipFile(fileobj=cls(data)).read() + else: + raise Exception('Invalid or onsupported compression type: %s' % + (compression_type)) diff -Nru libcloud-0.5.0/libcloud/utils/connection.py libcloud-0.15.1/libcloud/utils/connection.py --- libcloud-0.5.0/libcloud/utils/connection.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/connection.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils.py3 import urlparse, parse_qs +from libcloud.common.base import Connection + +__all__ = [ + 'get_response_object' +] + + +def get_response_object(url): + """ + Utility function which uses libcloud's connection class to issue an HTTP + request. + + :param url: URL to send the request to. + :type url: ``str`` + + :return: Response object. + :rtype: :class:`Response`. + """ + parsed_url = urlparse.urlparse(url) + parsed_qs = parse_qs(parsed_url.query) + secure = parsed_url.scheme == 'https' + + con = Connection(secure=secure, host=parsed_url.netloc) + response = con.request(method='GET', action=parsed_url.path, + params=parsed_qs) + return response diff -Nru libcloud-0.5.0/libcloud/utils/dist.py libcloud-0.15.1/libcloud/utils/dist.py --- libcloud-0.5.0/libcloud/utils/dist.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/dist.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Taken From Twisted Python which licensed under MIT license +# https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py +# https://github.com/powdahound/twisted/blob/master/LICENSE + +import os +import fnmatch + +# Names that are excluded from globbing results: +EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs', + 'RCS', 'SCCS', '.svn'] +EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py'] + + +def _filter_names(names): + """ + Given a list of file names, return those names that should be copied. + """ + names = [n for n in names + if n not in EXCLUDE_NAMES] + # This is needed when building a distro from a working + # copy (likely a checkout) rather than a pristine export: + for pattern in EXCLUDE_PATTERNS: + names = [n for n in names + if (not fnmatch.fnmatch(n, pattern)) + and (not n.endswith('.py'))] + return names + + +def relative_to(base, relativee): + """ + Gets 'relativee' relative to 'basepath'. + + i.e., + + >>> relative_to('/home/', '/home/radix/') + 'radix' + >>> relative_to('.', '/home/radix/Projects/Twisted') + 'Projects/Twisted' + + The 'relativee' must be a child of 'basepath'. + """ + basepath = os.path.abspath(base) + relativee = os.path.abspath(relativee) + if relativee.startswith(basepath): + relative = relativee[len(basepath):] + if relative.startswith(os.sep): + relative = relative[1:] + return os.path.join(base, relative) + raise ValueError("%s is not a subpath of %s" % (relativee, basepath)) + + +def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None): + """ + Get all packages which are under dname. This is necessary for + Python 2.2's distutils. Pretty similar arguments to getDataFiles, + including 'parent'. + """ + parent = parent or "" + prefix = [] + if parent: + prefix = [parent] + bname = os.path.basename(dname) + ignore = ignore or [] + if bname in ignore: + return [] + if results is None: + results = [] + if pkgname is None: + pkgname = [] + subfiles = os.listdir(dname) + abssubfiles = [os.path.join(dname, x) for x in subfiles] + + if '__init__.py' in subfiles: + results.append(prefix + pkgname + [bname]) + for subdir in filter(os.path.isdir, abssubfiles): + get_packages(subdir, pkgname=pkgname + [bname], + results=results, ignore=ignore, + parent=parent) + res = ['.'.join(result) for result in results] + return res + + +def get_data_files(dname, ignore=None, parent=None): + """ + Get all the data files that should be included in this distutils Project. + + 'dname' should be the path to the package that you're distributing. + + 'ignore' is a list of sub-packages to ignore. This facilitates + disparate package hierarchies. That's a fancy way of saying that + the 'twisted' package doesn't want to include the 'twisted.conch' + package, so it will pass ['conch'] as the value. + + 'parent' is necessary if you're distributing a subpackage like + twisted.conch. 'dname' should point to 'twisted/conch' and 'parent' + should point to 'twisted'. This ensures that your data_files are + generated correctly, only using relative paths for the first element + of the tuple ('twisted/conch/*'). + The default 'parent' is the current working directory. + """ + parent = parent or "." + ignore = ignore or [] + result = [] + for directory, subdirectories, filenames in os.walk(dname): + resultfiles = [] + for exname in EXCLUDE_NAMES: + if exname in subdirectories: + subdirectories.remove(exname) + for ig in ignore: + if ig in subdirectories: + subdirectories.remove(ig) + for filename in _filter_names(filenames): + resultfiles.append(filename) + if resultfiles: + for filename in resultfiles: + file_path = os.path.join(directory, filename) + if parent: + file_path = file_path.replace(parent + os.sep, '') + result.append(file_path) + + return result diff -Nru libcloud-0.5.0/libcloud/utils/files.py libcloud-0.15.1/libcloud/utils/files.py --- libcloud-0.5.0/libcloud/utils/files.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/files.py 2014-05-26 15:42:52.000000000 +0000 @@ -0,0 +1,129 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import mimetypes + +from libcloud.utils.py3 import PY3 +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import next +from libcloud.utils.py3 import b + +if PY3: + from io import FileIO as file + +CHUNK_SIZE = 8096 + +__all__ = [ + 'read_in_chunks', + 'exhaust_iterator', + 'guess_file_mime_type' +] + + +def read_in_chunks(iterator, chunk_size=None, fill_size=False, + yield_empty=False): + """ + Return a generator which yields data in chunks. + + :param terator: An object which implements an iterator interface + or a File like object with read method. + :type iterator: :class:`object` which implements iterator interface. + + :param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) + :type chunk_size: ``int`` + + :param fill_size: If True, make sure chunks are exactly chunk_size in + length (except for last chunk). + :type fill_size: ``bool`` + + :param yield_empty: If true and iterator returned no data, yield empty + bytes object before raising StopIteration. + :type yield_empty: ``bool`` + + TODO: At some point in the future we could use byte arrays here if version + >= Python 3. This should speed things up a bit and reduce memory usage. + """ + chunk_size = chunk_size or CHUNK_SIZE + + if isinstance(iterator, (file, httplib.HTTPResponse)): + get_data = iterator.read + args = (chunk_size, ) + else: + get_data = next + args = (iterator, ) + + data = b('') + empty = False + + while not empty or len(data) > 0: + if not empty: + try: + chunk = b(get_data(*args)) + if len(chunk) > 0: + data += chunk + else: + empty = True + except StopIteration: + empty = True + + if len(data) == 0: + if empty and yield_empty: + yield b('') + + raise StopIteration + + if fill_size: + if empty or len(data) >= chunk_size: + yield data[:chunk_size] + data = data[chunk_size:] + else: + yield data + data = b('') + + +def exhaust_iterator(iterator): + """ + Exhaust an iterator and return all data returned by it. + + :type iterator: :class:`object` which implements iterator interface. + :param response: An object which implements an iterator interface + or a File like object with read method. + + :rtype ``str`` + :return Data returned by the iterator. + """ + data = b('') + + try: + chunk = b(next(iterator)) + except StopIteration: + chunk = b('') + + while len(chunk) > 0: + data += chunk + + try: + chunk = b(next(iterator)) + except StopIteration: + chunk = b('') + + return data + + +def guess_file_mime_type(file_path): + filename = os.path.basename(file_path) + (mimetype, encoding) = mimetypes.guess_type(filename) + return mimetype, encoding diff -Nru libcloud-0.5.0/libcloud/utils/__init__.py libcloud-0.15.1/libcloud/utils/__init__.py --- libcloud-0.5.0/libcloud/utils/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/__init__.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +__all__ = [ + 'SHOW_DEPRECATION_WARNING', + 'SHOW_IN_DEVELOPMENT_WARNING', + 'OLD_API_REMOVE_VERSION', + 'deprecated_warning', + 'in_development_warning' +] + + +SHOW_DEPRECATION_WARNING = True +SHOW_IN_DEVELOPMENT_WARNING = True +OLD_API_REMOVE_VERSION = '0.7.0' + + +def deprecated_warning(module): + if SHOW_DEPRECATION_WARNING: + warnings.warn('This path has been deprecated and the module' + ' is now available at "libcloud.compute.%s".' + ' This path will be fully removed in libcloud %s.' % + (module, OLD_API_REMOVE_VERSION), + category=DeprecationWarning) + + +def in_development_warning(module): + if SHOW_IN_DEVELOPMENT_WARNING: + warnings.warn('The module %s is in development and your are advised ' + 'against using it in production.' % (module), + category=FutureWarning) diff -Nru libcloud-0.5.0/libcloud/utils/iso8601.py libcloud-0.15.1/libcloud/utils/iso8601.py --- libcloud-0.5.0/libcloud/utils/iso8601.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/iso8601.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,129 @@ +""" +Copyright (c) 2007 Michael Twomey + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +ISO 8601 date time string parsing + +Basic usage: +>>> import iso8601 +>>> iso8601.parse_date("2007-01-25T12:00:00Z") +datetime.datetime(2007, 1, 25, 12, 0, tzinfo=) +>>> +""" + +# Taken from pyiso8601 which is licensed under the MIT license. + +from datetime import datetime, timedelta, tzinfo +import re + +__all__ = ["parse_date", "ParseError"] + +# Adapted from http://delete.me.uk/2005/03/iso8601.html +ISO8601_REGEX = re.compile( + r"(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})" + r"((?P.)(?P[0-9]{2}):(?P[0-9]{2})(:(?P[0-9]{2})(\.(?P[0-9]+))?)?" # NOQA + r"(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?") +TIMEZONE_REGEX = re.compile("(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})") # NOQA + + +class ParseError(Exception): + """Raised when there is a problem parsing a date string""" + +# Yoinked from python docs +ZERO = timedelta(0) + + +class Utc(tzinfo): + """UTC + + """ + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO +UTC = Utc() + + +class FixedOffset(tzinfo): + """Fixed offset in hours and minutes from UTC + + """ + def __init__(self, offset_hours, offset_minutes, name): + self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return ZERO + + def __repr__(self): + return "" % self.__name + + +def parse_timezone(tzstring, default_timezone=UTC): + """Parses ISO 8601 time zone specs into tzinfo offsets + + """ + if tzstring == "Z": + return default_timezone + # This isn't strictly correct, but it's common to encounter dates without + # timezones so I'll assume the default (which defaults to UTC). + # Addresses issue 4. + if tzstring is None: + return default_timezone + m = TIMEZONE_REGEX.match(tzstring) + prefix, hours, minutes = m.groups() + hours, minutes = int(hours), int(minutes) + if prefix == "-": + hours = -hours + minutes = -minutes + return FixedOffset(hours, minutes, tzstring) + + +def parse_date(datestring, default_timezone=UTC): + """Parses ISO 8601 dates into datetime objects + + The timezone is parsed from the date string. However it is quite common to + have dates without a timezone (not strictly correct). In this case the + default timezone specified in default_timezone is used. This is UTC by + default. + """ + m = ISO8601_REGEX.match(datestring) + if not m: + raise ParseError("Unable to parse date string %r" % datestring) + groups = m.groupdict() + tz = parse_timezone(groups["timezone"], default_timezone=default_timezone) + if groups["fraction"] is None: + groups["fraction"] = 0 + else: + groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) + return datetime(int(groups["year"]), int(groups["month"]), + int(groups["day"]), int(groups["hour"]), + int(groups["minute"]), int(groups["second"]), + int(groups["fraction"]), tz) diff -Nru libcloud-0.5.0/libcloud/utils/logging.py libcloud-0.15.1/libcloud/utils/logging.py --- libcloud-0.5.0/libcloud/utils/logging.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/logging.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Taken from https://github.com/Kami/python-extra-log-formatters + +from __future__ import absolute_import + +import logging + +__all__ = [ + 'ExtraLogFormatter' +] + + +class ExtraLogFormatter(logging.Formatter): + """ + Custom log formatter which attaches all the attributes from the "extra" + dictionary which start with an underscore to the end of the log message. + + For example: + extra={'_id': 'user-1', '_path': '/foo/bar'} + """ + def format(self, record): + custom_attributes = dict([(k, v) for k, v in record.__dict__.items() + if k.startswith('_')]) + custom_attributes = self._dict_to_str(custom_attributes) + + msg = logging.Formatter.format(self, record) + msg = '%s (%s)' % (msg, custom_attributes) + return msg + + def _dict_to_str(self, dictionary): + result = ['%s=%s' % (k[1:], str(v)) for k, v in dictionary.items()] + result = ','.join(result) + return result diff -Nru libcloud-0.5.0/libcloud/utils/misc.py libcloud-0.15.1/libcloud/utils/misc.py --- libcloud-0.5.0/libcloud/utils/misc.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/misc.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,254 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import binascii + + +__all__ = [ + 'get_driver', + 'set_driver', + 'merge_valid_keys', + 'get_new_obj', + 'str2dicts', + 'dict2str', + 'reverse_dict', + 'lowercase_keys', + 'get_secure_random_string' +] + + +def get_driver(drivers, provider): + """ + Get a driver. + + :param drivers: Dictionary containing valid providers. + :param provider: Id of provider to get driver + :type provider: :class:`libcloud.types.Provider` + """ + if provider in drivers: + mod_name, driver_name = drivers[provider] + _mod = __import__(mod_name, globals(), locals(), [driver_name]) + return getattr(_mod, driver_name) + + raise AttributeError('Provider %s does not exist' % (provider)) + + +def set_driver(drivers, provider, module, klass): + """ + Sets a driver. + + :param drivers: Dictionary to store providers. + :param provider: Id of provider to set driver for + :type provider: :class:`libcloud.types.Provider` + :param module: The module which contains the driver + :type module: L + :param klass: The driver class name + :type klass: + """ + + if provider in drivers: + raise AttributeError('Provider %s already registered' % (provider)) + + drivers[provider] = (module, klass) + + # Check if this driver is valid + try: + driver = get_driver(drivers, provider) + except (ImportError, AttributeError): + exp = sys.exc_info()[1] + drivers.pop(provider) + raise exp + + return driver + + +def merge_valid_keys(params, valid_keys, extra): + """ + Merge valid keys from extra into params dictionary and return + dictionary with keys which have been merged. + + Note: params is modified in place. + """ + merged = {} + if not extra: + return merged + + for key in valid_keys: + if key in extra: + params[key] = extra[key] + merged[key] = extra[key] + + return merged + + +def get_new_obj(obj, klass, attributes): + """ + Pass attributes from the existing object 'obj' and attributes + dictionary to a 'klass' constructor. + Attributes from 'attributes' dictionary are only passed to the + constructor if they are not None. + """ + kwargs = {} + for key, value in list(obj.__dict__.items()): + if isinstance(value, dict): + kwargs[key] = value.copy() + elif isinstance(value, (tuple, list)): + kwargs[key] = value[:] + else: + kwargs[key] = value + + for key, value in list(attributes.items()): + if value is None: + continue + + if isinstance(value, dict): + kwargs_value = kwargs.get(key, {}) + for key1, value2 in list(value.items()): + if value2 is None: + continue + + kwargs_value[key1] = value2 + kwargs[key] = kwargs_value + else: + kwargs[key] = value + + return klass(**kwargs) + + +def str2dicts(data): + """ + Create a list of dictionaries from a whitespace and newline delimited text. + + For example, this: + cpu 1100 + ram 640 + + cpu 2200 + ram 1024 + + becomes: + [{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}] + """ + list_data = [] + list_data.append({}) + d = list_data[-1] + + lines = data.split('\n') + for line in lines: + line = line.strip() + + if not line: + d = {} + list_data.append(d) + d = list_data[-1] + continue + + whitespace = line.find(' ') + + if not whitespace: + continue + + key = line[0:whitespace] + value = line[whitespace + 1:] + d.update({key: value}) + + list_data = [val for val in list_data if val != {}] + return list_data + + +def str2list(data): + """ + Create a list of values from a whitespace and newline delimited text + (keys are ignored). + + For example, this: + ip 1.2.3.4 + ip 1.2.3.5 + ip 1.2.3.6 + + becomes: + ['1.2.3.4', '1.2.3.5', '1.2.3.6'] + """ + list_data = [] + + for line in data.split('\n'): + line = line.strip() + + if not line: + continue + + try: + splitted = line.split(' ') + # key = splitted[0] + value = splitted[1] + except Exception: + continue + + list_data.append(value) + + return list_data + + +def dict2str(data): + """ + Create a string with a whitespace and newline delimited text from a + dictionary. + + For example, this: + {'cpu': '1100', 'ram': '640', 'smp': 'auto'} + + becomes: + cpu 1100 + ram 640 + smp auto + + cpu 2200 + ram 1024 + """ + result = '' + for k in data: + if data[k] is not None: + result += '%s %s\n' % (str(k), str(data[k])) + else: + result += '%s\n' % str(k) + + return result + + +def reverse_dict(dictionary): + return dict([(value, key) for key, value in list(dictionary.items())]) + + +def lowercase_keys(dictionary): + return dict(((k.lower(), v) for k, v in dictionary.items())) + + +def get_secure_random_string(size): + """ + Return a string of ``size`` random bytes. Returned string is suitable for + cryptographic use. + + :param size: Size of the generated string. + :type size: ``int`` + + :return: Random string. + :rtype: ``str`` + """ + value = os.urandom(size) + value = binascii.hexlify(value) + value = value.decode('utf-8')[:size] + return value diff -Nru libcloud-0.5.0/libcloud/utils/networking.py libcloud-0.15.1/libcloud/utils/networking.py --- libcloud-0.5.0/libcloud/utils/networking.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/networking.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,80 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket +import struct + +__all__ = [ + 'is_private_subnet', + 'is_public_subnet', + 'is_valid_ip_address' +] + + +def is_private_subnet(ip): + """ + Utility function to check if an IP address is inside a private subnet. + + :type ip: ``str`` + :param ip: IP address to check + + :return: ``bool`` if the specified IP address is private. + """ + priv_subnets = [{'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, + {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, + {'subnet': '192.168.0.0', 'mask': '255.255.0.0'}] + + ip = struct.unpack('I', socket.inet_aton(ip))[0] + + for network in priv_subnets: + subnet = struct.unpack('I', socket.inet_aton(network['subnet']))[0] + mask = struct.unpack('I', socket.inet_aton(network['mask']))[0] + + if (ip & mask) == (subnet & mask): + return True + + return False + + +def is_public_subnet(ip): + """ + Utility function to check if an IP address is inside a public subnet. + + :type ip: ``str`` + :param ip: IP address to check + + :return: ``bool`` if the specified IP address is public. + """ + return not is_private_subnet(ip=ip) + + +def is_valid_ip_address(address, family=socket.AF_INET): + """ + Check if the provided address is valid IPv4 or IPv6 address. + + :param address: IPv4 or IPv6 ddress to check. + :type address: ``str`` + + :param family: Address family (socket.AF_INTET / socket.AF_INET6). + :type family: ``int`` + + :return: ``bool`` True if the provided address is valid. + """ + try: + socket.inet_pton(family, address) + except socket.error: + return False + + return True diff -Nru libcloud-0.5.0/libcloud/utils/publickey.py libcloud-0.15.1/libcloud/utils/publickey.py --- libcloud-0.5.0/libcloud/utils/publickey.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/publickey.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import hashlib + +from libcloud.utils.py3 import hexadigits +from libcloud.utils.py3 import bchr + +__all__ = [ + 'get_pubkey_openssh_fingerprint', + 'get_pubkey_ssh2_fingerprint', + 'get_pubkey_comment' +] + +try: + from Crypto.Util.asn1 import DerSequence, DerObject + from Crypto.PublicKey.RSA import algorithmIdentifier, importKey + pycrypto_available = True +except ImportError: + pycrypto_available = False + + +def _to_md5_fingerprint(data): + hashed = hashlib.md5(data).digest() + return ":".join(hexadigits(hashed)) + + +def get_pubkey_openssh_fingerprint(pubkey): + # We import and export the key to make sure it is in OpenSSH format + if not pycrypto_available: + raise RuntimeError('pycrypto is not available') + k = importKey(pubkey) + pubkey = k.exportKey('OpenSSH')[7:] + decoded = base64.decodestring(pubkey) + return _to_md5_fingerprint(decoded) + + +def get_pubkey_ssh2_fingerprint(pubkey): + # This is the format that EC2 shows for public key fingerprints in its + # KeyPair mgmt API + if not pycrypto_available: + raise RuntimeError('pycrypto is not available') + k = importKey(pubkey) + derPK = DerSequence([k.n, k.e]) + bitmap = DerObject('BIT STRING') + bitmap.payload = bchr(0x00) + derPK.encode() + der = DerSequence([algorithmIdentifier, bitmap.encode()]) + return _to_md5_fingerprint(der.encode()) + + +def get_pubkey_comment(pubkey, default=None): + if pubkey.startswith("ssh-"): + # This is probably an OpenSSH key + return pubkey.strip().split(' ', 3)[2] + if default: + return default + raise ValueError('Public key is not in a supported format') diff -Nru libcloud-0.5.0/libcloud/utils/py3.py libcloud-0.15.1/libcloud/utils/py3.py --- libcloud-0.5.0/libcloud/utils/py3.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/py3.py 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,195 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Libcloud Python 2.x and 3.x compatibility layer +# Some methods below are taken from Django PYK3 port which is licensed under 3 +# clause BSD license +# https://bitbucket.org/loewis/django-3k + +from __future__ import absolute_import + +import sys +import types + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +PY2 = False +PY25 = False +PY27 = False +PY3 = False +PY32 = False + +if sys.version_info >= (2, 0) and sys.version_info < (3, 0): + PY2 = True + +if sys.version_info >= (2, 5) and sys.version_info <= (2, 6): + PY25 = True + +if sys.version_info >= (2, 7) and sys.version_info <= (2, 8): + PY27 = True + +if sys.version_info >= (3, 0): + PY3 = True + +if sys.version_info >= (3, 2) and sys.version_info < (3, 3): + PY32 = True + +if PY3: + import http.client as httplib + from io import StringIO + import urllib + import urllib as urllib2 + import urllib.parse as urlparse + import xmlrpc.client as xmlrpclib + + from urllib.parse import quote as urlquote + from urllib.parse import unquote as urlunquote + from urllib.parse import urlencode as urlencode + from os.path import relpath + + from imp import reload + + from builtins import bytes + from builtins import next + + parse_qs = urlparse.parse_qs + parse_qsl = urlparse.parse_qsl + + basestring = str + + def method_type(callable, instance, klass): + return types.MethodType(callable, instance or klass()) + + def b(s): + if isinstance(s, str): + return s.encode('utf-8') + elif isinstance(s, bytes): + return s + else: + raise TypeError("Invalid argument %r for b()" % (s,)) + + def ensure_string(s): + if isinstance(s, str): + return s + elif isinstance(s, bytes): + return s.decode('utf-8') + else: + raise TypeError("Invalid argument %r for ensure_string()" % (s,)) + + def byte(n): + # assume n is a Latin-1 string of length 1 + return ord(n) + u = str + + def bchr(s): + """Take an integer and make a 1-character byte string.""" + return bytes([s]) + + def dictvalues(d): + return list(d.values()) + + def tostring(node): + return ET.tostring(node, encoding='unicode') + + def hexadigits(s): + # s needs to be a byte string. + return [format(x, "x") for x in s] + +else: + import httplib # NOQA + from StringIO import StringIO # NOQA + import urllib # NOQA + import urllib2 # NOQA + import urlparse # NOQA + import xmlrpclib # NOQA + from urllib import quote as _urlquote # NOQA + from urllib import unquote as urlunquote # NOQA + from urllib import urlencode as urlencode # NOQA + + from __builtin__ import reload # NOQA + + if PY25: + import cgi + + parse_qs = cgi.parse_qs + parse_qsl = cgi.parse_qsl + else: + parse_qs = urlparse.parse_qs + parse_qsl = urlparse.parse_qsl + + if not PY25: + from os.path import relpath # NOQA + + # Save the real value of unicode because urlquote needs it to tell the + # difference between a unicode string and a byte string. + _real_unicode = unicode + basestring = unicode = str + + method_type = types.MethodType + + b = bytes = ensure_string = str + + def byte(n): + return n + + u = unicode + + def bchr(s): + """Take an integer and make a 1-character byte string.""" + return chr(s) + + def next(i): + return i.next() + + def dictvalues(d): + return d.values() + + tostring = ET.tostring + + def urlquote(s, safe='/'): + if isinstance(s, _real_unicode): + # Pretend to be py3 by encoding the URI automatically. + s = s.encode('utf8') + return _urlquote(s, safe) + + def hexadigits(s): + # s needs to be a string. + return [x.encode("hex") for x in s] + +if PY25: + import posixpath + + # Taken from http://jimmyg.org/work/code/barenecessities/index.html + # (MIT license) + def relpath(path, start=posixpath.curdir): # NOQA + """Return a relative version of a path""" + if not path: + raise ValueError("no path specified") + start_list = posixpath.abspath(start).split(posixpath.sep) + path_list = posixpath.abspath(path).split(posixpath.sep) + # Work out how much of the filepath is shared by start and path. + i = len(posixpath.commonprefix([start_list, path_list])) + rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:] + if not rel_list: + return posixpath.curdir + return posixpath.join(*rel_list) + +if PY27 or PY3: + unittest2_required = False +else: + unittest2_required = True diff -Nru libcloud-0.5.0/libcloud/utils/xml.py libcloud-0.15.1/libcloud/utils/xml.py --- libcloud-0.5.0/libcloud/utils/xml.py 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils/xml.py 2013-11-29 12:35:06.000000000 +0000 @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'fixxpath', + 'findtext', + 'findattr', + 'findall' +] + + +def fixxpath(xpath, namespace=None): + # ElementTree wants namespaces in its xpaths, so here we add them. + if not namespace: + return xpath + + return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')]) + + +def findtext(element, xpath, namespace=None, no_text_value=''): + """ + :param no_text_value: Value to return if the provided element has no text + value. + :type no_text_value: ``object`` + """ + value = element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + + if value == '': + return no_text_value + return value + + +def findattr(element, xpath, namespace=None): + return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + + +def findall(element, xpath, namespace=None): + return element.findall(fixxpath(xpath=xpath, namespace=namespace)) diff -Nru libcloud-0.5.0/libcloud/utils.py libcloud-0.15.1/libcloud/utils.py --- libcloud-0.5.0/libcloud/utils.py 2011-05-14 10:16:05.000000000 +0000 +++ libcloud-0.15.1/libcloud/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import mimetypes -import warnings -from httplib import HTTPResponse - -SHOW_DEPRECATION_WARNING = True -SHOW_IN_DEVELOPMENT_WARNING = True -OLD_API_REMOVE_VERSION = '0.6.0' - -def read_in_chunks(iterator, chunk_size=None): - """ - Return a generator which yields data in chunks. - - @type iterator: C{Iterator} - @param response: An object which implements an iterator interface - or a File like object with read method. - - @type chunk_size: C{int} - @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) - """ - - if isinstance(iterator, (file, HTTPResponse)): - get_data = iterator.read - args = (chunk_size, ) - else: - get_data = iterator.next - args = () - - while True: - chunk = str(get_data(*args)) - - if len(chunk) == 0: - raise StopIteration - - yield chunk - -def guess_file_mime_type(file_path): - filename = os.path.basename(file_path) - (mimetype, encoding) = mimetypes.guess_type(filename) - return mimetype, encoding - -def deprecated_warning(module): - if SHOW_DEPRECATION_WARNING: - warnings.warn('This path has been deprecated and the module' - ' is now available at "libcloud.compute.%s".' - ' This path will be fully removed in libcloud %s.' % - (module, OLD_API_REMOVE_VERSION), - category=DeprecationWarning) - -def in_development_warning(module): - if SHOW_IN_DEVELOPMENT_WARNING: - warnings.warn('The module %s is in development and your are advised ' - 'against using it in production.' % (module), - category=FutureWarning) - -def str2dicts(data): - """ - Create a list of dictionaries from a whitespace and newline delimited text. - - For example, this: - cpu 1100 - ram 640 - - cpu 2200 - ram 1024 - - becomes: - [{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}] - """ - list_data = [] - list_data.append({}) - d = list_data[-1] - - lines = data.split('\n') - for line in lines: - line = line.strip() - - if not line: - d = {} - list_data.append(d) - d = list_data[-1] - continue - - whitespace = line.find(' ') - - if not whitespace: - continue - - key = line[0:whitespace] - value = line[whitespace + 1:] - d.update({key: value}) - - list_data = [value for value in list_data if value != {}] - return list_data - -def str2list(data): - """ - Create a list of values from a whitespace and newline delimited text (keys are ignored). - - For example, this: - ip 1.2.3.4 - ip 1.2.3.5 - ip 1.2.3.6 - - becomes: - ['1.2.3.4', '1.2.3.5', '1.2.3.6'] - """ - list_data = [] - - for line in data.split('\n'): - line = line.strip() - - if not line: - continue - - try: - splitted = line.split(' ') - # key = splitted[0] - value = splitted[1] - except Exception: - continue - - list_data.append(value) - - return list_data - -def dict2str(data): - """ - Create a string with a whitespace and newline delimited text from a dictionary. - - For example, this: - {'cpu': '1100', 'ram': '640', 'smp': 'auto'} - - becomes: - cpu 1100 - ram 640 - smp auto - - cpu 2200 - ram 1024 - """ - result = '' - for k in data: - if data[k] != None: - result += '%s %s\n' % (str(k), str(data[k])) - else: - result += '%s\n' % str(k) - - return result - -def fixxpath(xpath, namespace): - # ElementTree wants namespaces in its xpaths, so here we add them. - return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')]) - -def findtext(element, xpath, namespace): - return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) - -def findattr(element, xpath, namespace): - return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) - -def findall(element, xpath, namespace): - return element.findall(fixxpath(xpath=xpath, namespace=namespace)) - -def reverse_dict(dictionary): - return dict([ (value, key) for key, value in dictionary.iteritems() ]) - -def get_driver(drivers, provider): - """ - Get a driver. - - @param drivers: Dictionary containing valid providers. - @param provider: Id of provider to get driver - @type provider: L{libcloud.types.Provider} - """ - if provider in drivers: - mod_name, driver_name = drivers[provider] - _mod = __import__(mod_name, globals(), locals(), [driver_name]) - return getattr(_mod, driver_name) - - raise AttributeError('Provider %s does not exist' % (provider)) diff -Nru libcloud-0.5.0/MANIFEST.in libcloud-0.15.1/MANIFEST.in --- libcloud-0.5.0/MANIFEST.in 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/MANIFEST.in 2014-05-26 15:42:51.000000000 +0000 @@ -0,0 +1,20 @@ +include LICENSE +include NOTICE +include example_*.py +include CHANGES.rst +include README.rst +include tox.ini +include libcloud/data/pricing.json +prune libcloud/test/secrets.py +include demos/* +include libcloud/test/*.py +include libcloud/test/pricing_test.json +include libcloud/test/secrets.py-dist +include libcloud/test/compute/*.py +include libcloud/test/storage/*.py +include libcloud/test/loadbalancer/*.py +include libcloud/test/dns/*.py +include libcloud/test/compute/fixtures/*/* +include libcloud/test/storage/fixtures/*/* +include libcloud/test/loadbalancer/fixtures/*/* +include libcloud/test/dns/fixtures/*/* diff -Nru libcloud-0.5.0/NOTICE libcloud-0.15.1/NOTICE --- libcloud-0.5.0/NOTICE 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/NOTICE 2014-05-26 15:42:51.000000000 +0000 @@ -1,8 +1,8 @@ Apache Libcloud -Copyright (c) 2010 The Apache Software Foundation +Copyright (c) 2010-2014 The Apache Software Foundation -This product includes software developed by +This product includes software developed at The Apache Software Foundation (http://www.apache.org/). -This product includes software developed by +This product includes software developed by Cloudkick (http://www.cloudkick.com/). diff -Nru libcloud-0.5.0/PKG-INFO libcloud-0.15.1/PKG-INFO --- libcloud-0.5.0/PKG-INFO 2011-05-21 21:33:12.000000000 +0000 +++ libcloud-0.15.1/PKG-INFO 2014-07-02 21:19:07.000000000 +0000 @@ -1,10 +1,10 @@ -Metadata-Version: 1.0 +Metadata-Version: 1.1 Name: apache-libcloud -Version: 0.5.0 -Summary: A unified interface into many cloud server providers -Home-page: http://incubator.apache.org/libcloud/ +Version: 0.15.1 +Summary: A standard Python library that abstracts away differences among multiple cloud provider APIs. For more information and documentation, please see http://libcloud.apache.org +Home-page: http://libcloud.apache.org/ Author: Apache Software Foundation -Author-email: libcloud@incubator.apache.org +Author-email: dev@libcloud.apache.org License: Apache License (2.0) Description: UNKNOWN Platform: UNKNOWN @@ -15,3 +15,13 @@ Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: PyPy diff -Nru libcloud-0.5.0/README libcloud-0.15.1/README --- libcloud-0.5.0/README 2011-03-24 19:47:23.000000000 +0000 +++ libcloud-0.15.1/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ - -Apache libcloud - a unified interface into the cloud -==================================================== - -The goal of this project is to create a basic yet functional standard library -into various cloud providers. - -Apache libcloud is an incubator project at the Apache Software Foundation, see - for more information. - -For API documentation and examples, see: - - - -Important Security Note -======================= - -Python's built-in SSL module does not do certificate validation. - -To address this, we've introduced the libcloud.security module with tunable -parameters. - -View the entire guide at: - -Enabling SSL Certificate Check -============================== - - import libcloud.security - libcloud.security.VERIFY_SSL_CERT = True - - # optionally, add to CA_CERTS_PATH - libcloud.security.CA_CERTS_PATH.append("/path/to/your/cacerts.txt") - -CA_CERTS_PATH contains common paths to CA bundle installations on the -following platforms: - - * openssl on CentOS/Fedora - * ca-certificates on Debian/Ubuntu/Arch/Gentoo - * ca_root_nss on FreeBSD - * curl-ca-bundle on Mac OS X - -Note for OS X Users -=================== - -OS X root certificates are stored in the Keychain format, unlike the standard -PEM format available on other *nix platforms. For this reason, it is not -possible to include the standard OS X root certificates with CA_CERTS_PATH. - -Acquiring CA Certificates -========================= - -If the above packages are unavailable to you, and you don't wish to roll your -own, the makers of cURL provides an excellent resource, generated from -Mozilla: http://curl.haxx.se/docs/caextract.html - -Feedback -======== - -Please send feedback to the mailing list at , -or the JIRA at . diff -Nru libcloud-0.5.0/README.rst libcloud-0.15.1/README.rst --- libcloud-0.5.0/README.rst 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/README.rst 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,51 @@ +Apache Libcloud - a unified interface into the cloud +==================================================== + +.. image:: https://badge.fury.io/py/apache-libcloud.png + :target: http://badge.fury.io/py/apache-libcloud + +.. image:: https://pypip.in/d/apache-libcloud/badge.png + :target: https://crate.io/packages/apache-libcloud/ + +.. image:: https://secure.travis-ci.org/apache/libcloud.png?branch=trunk + :target: http://travis-ci.org/apache/libcloud + +Apache Libcloud is a Python library which hides differences between different +cloud provider APIs and allows you to manage different cloud resources +through a unified and easy to use API. + +Resource you can manage with Libcloud are divided in the following categories: + +* Cloud Servers and Block Storage - services such as Amazon EC2 and Rackspace + Cloud Servers (``libcloud.compute.*``) +* Cloud Object Storage and CDN - services such as Amazon S3 and Rackspace + CloudFiles (``libcloud.storage.*``) +* Load Balancers as a Service, LBaaS (``libcloud.loadbalancer.*``) +* DNS as a Service, DNSaaS (``libcloud.dns.*``) + +Apache Libcloud is an Apache project, see for +more information. + +Documentation +============= + +Documentation can be found at https://libcloud.readthedocs.org. + +Feedback +======== + +Please send feedback to the mailing list at , +or the JIRA at . + +Contributing +============ + +For information on how to contribute, please see the Contributing +chapter in our documentation + + +License +======= + +Apache Libcloud is licensed under the Apache 2.0 license. For more information, +please see LICENSE and NOTICE file. diff -Nru libcloud-0.5.0/setup.cfg libcloud-0.15.1/setup.cfg --- libcloud-0.5.0/setup.cfg 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/setup.cfg 2014-07-02 21:19:07.000000000 +0000 @@ -0,0 +1,8 @@ +[wheel] +universal = 1 + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff -Nru libcloud-0.5.0/setup.py libcloud-0.15.1/setup.py --- libcloud-0.5.0/setup.py 2011-05-21 21:12:28.000000000 +0000 +++ libcloud-0.15.1/setup.py 2014-06-11 14:27:59.000000000 +0000 @@ -16,20 +16,46 @@ import sys import doctest -from distutils.core import setup +from setuptools import setup from distutils.core import Command from unittest import TextTestRunner, TestLoader from glob import glob +from subprocess import call from os.path import splitext, basename, join as pjoin -import libcloud.utils -libcloud.utils.SHOW_DEPRECATION_WARNING = False +try: + import epydoc + has_epydoc = True +except ImportError: + has_epydoc = False + +import libcloud.utils.misc +from libcloud.utils.dist import get_packages, get_data_files +from libcloud.utils.py3 import unittest2_required + +libcloud.utils.misc.SHOW_DEPRECATION_WARNING = False + + +HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/libcloud/trunk' +PROJECT_BASE_DIR = 'http://libcloud.apache.org' +TEST_PATHS = ['libcloud/test', 'libcloud/test/common', 'libcloud/test/compute', + 'libcloud/test/storage', 'libcloud/test/loadbalancer', + 'libcloud/test/dns'] +DOC_TEST_MODULES = ['libcloud.compute.drivers.dummy', + 'libcloud.storage.drivers.dummy', + 'libcloud.dns.drivers.dummy'] + +SUPPORTED_VERSIONS = ['2.5', '2.6', '2.7', 'PyPy', '3.x'] + +if sys.version_info <= (2, 4): + version = '.'.join([str(x) for x in sys.version_info[:3]]) + print('Version ' + version + ' is not supported. Supported versions are ' + + ', '.join(SUPPORTED_VERSIONS)) + sys.exit(1) + +# pre-2.6 will need the ssl PyPI package +pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6) -HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/incubator/libcloud/trunk' -PROJECT_BASE_DIR = 'http://incubator.apache.org/libcloud/' -TEST_PATHS = [ 'test', 'test/compute', 'test/storage' , 'test/loadbalancer'] -DOC_TEST_MODULES = [ 'libcloud.compute.drivers.dummy', - 'libcloud.storage.drivers.dummy' ] def read_version_string(): version = None @@ -39,7 +65,20 @@ sys.path.pop(0) return version + +def forbid_publish(): + argv = sys.argv + if 'upload'in argv: + print('You shouldn\'t use upload command to upload a release to PyPi. ' + 'You need to manually upload files generated using release.sh ' + 'script.\n' + 'For more information, see "Making a release section" in the ' + 'documentation') + sys.exit(1) + + class TestCommand(Command): + description = "run test suite" user_options = [] def initialize_options(self): @@ -53,19 +92,47 @@ pass def run(self): + try: + import mock + mock + except ImportError: + print('Missing "mock" library. mock is library is needed ' + 'to run the tests. You can install it using pip: ' + 'pip install mock') + sys.exit(1) + + if unittest2_required: + try: + import unittest2 + unittest2 + except ImportError: + print('Python version: %s' % (sys.version)) + print('Missing "unittest2" library. unittest2 is library is ' + 'needed to run the tests. You can install it using pip: ' + 'pip install unittest2') + sys.exit(1) + status = self._run_tests() sys.exit(status) def _run_tests(self): - secrets = pjoin(self._dir, 'test', 'secrets.py') - if not os.path.isfile(secrets): - print "Missing %s" % (secrets) - print "Maybe you forgot to copy it from -dist:" - print " cp test/secrets.py-dist test/secrets.py" + secrets_current = pjoin(self._dir, 'libcloud/test', 'secrets.py') + secrets_dist = pjoin(self._dir, 'libcloud/test', 'secrets.py-dist') + + if not os.path.isfile(secrets_current): + print("Missing " + secrets_current) + print("Maybe you forgot to copy it from -dist:") + print("cp libcloud/test/secrets.py-dist libcloud/test/secrets.py") sys.exit(1) - pre_python26 = (sys.version_info[0] == 2 - and sys.version_info[1] < 6) + mtime_current = os.path.getmtime(secrets_current) + mtime_dist = os.path.getmtime(secrets_dist) + + if mtime_dist > mtime_current: + print("It looks like test/secrets.py file is out of date.") + print("Please copy the new secrets.py-dist file over otherwise" + + " tests might fail") + if pre_python26: missing = [] # test for dependencies @@ -82,7 +149,7 @@ missing.append("ssl") if missing: - print "Missing dependencies: %s" % ", ".join(missing) + print("Missing dependencies: " + ", ".join(missing)) sys.exit(1) testfiles = [] @@ -96,11 +163,13 @@ for test_module in DOC_TEST_MODULES: tests.addTests(doctest.DocTestSuite(test_module)) - t = TextTestRunner(verbosity = 2) + t = TextTestRunner(verbosity=2) res = t.run(tests) return not res.wasSuccessful() + class ApiDocsCommand(Command): + description = "generate API documentation" user_options = [] def initialize_options(self): @@ -110,6 +179,9 @@ pass def run(self): + if not has_epydoc: + raise RuntimeError('Missing "epydoc" package!') + os.system( 'pydoctor' ' --add-package=libcloud' @@ -118,10 +190,11 @@ ' --html-viewsource-base="%s"' ' --project-base-dir=`pwd`' ' --project-url="%s"' - % (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR) - ) + % (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR)) + class CoverageCommand(Command): + description = "run test suite and generate coverage report" user_options = [] def initialize_options(self): @@ -142,40 +215,30 @@ cov.save() cov.html_report() -# pre-2.6 will need the ssl PyPI package -pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6) +forbid_publish() setup( name='apache-libcloud', version=read_version_string(), - description='A unified interface into many cloud server providers', + description='A standard Python library that abstracts away differences' + + ' among multiple cloud provider APIs. For more information' + + ' and documentation, please see http://libcloud.apache.org', author='Apache Software Foundation', - author_email='libcloud@incubator.apache.org', + author_email='dev@libcloud.apache.org', requires=([], ['ssl', 'simplejson'],)[pre_python26], - packages=[ - 'libcloud', - 'libcloud.common', - 'libcloud.compute', - 'libcloud.compute.drivers', - 'libcloud.storage', - 'libcloud.storage.drivers', - 'libcloud.drivers', - 'libcloud.loadbalancer', - 'libcloud.loadbalancer.drivers', - ], + packages=get_packages('libcloud'), package_dir={ 'libcloud': 'libcloud', }, - package_data={ - 'libcloud': ['data/*.json'] - }, + package_data={'libcloud': get_data_files('libcloud', parent='libcloud')}, license='Apache License (2.0)', - url='http://incubator.apache.org/libcloud/', + url='http://libcloud.apache.org/', cmdclass={ 'test': TestCommand, 'apidocs': ApiDocsCommand, 'coverage': CoverageCommand }, + zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', @@ -183,6 +246,14 @@ 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Topic :: Software Development :: Libraries :: Python Modules' - ], -) + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.0', + 'Programming Language :: Python :: 3.1', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: Implementation :: PyPy']) diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_block_products_json.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_block_products_json.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_block_products_json.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_block_products_json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -[{"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}, {"cost": 0.25, "id": "b412f354-5056-4bf0-a42f-6ddd998aa092", "description": "Block 2GB Virtual Server"}, {"cost": 0.35, "id": "0cd183d3-0287-4b1a-8288-b3ea8302ed58", "description": "Block 4GB Virtual Server"}, {"cost": 0.45, "id": "b9b87a5b-2885-4a2e-b434-44a163ca6251", "description": "Block 8GB Virtual Server"}] diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"text":"Block destroyed."} diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"ips": [{"address": "67.214.214.212"}], "memory": 1073741824, "template": "centos", "id": "99df878c-6e5c-4945-a635-d94da9fd3146", "storage": 21474836480, "hostname": "apitest.c44905.c44905.blueboxgrid.com", "description": "1 GB RAM + 20 GB Disk", "cpu": 0.5, "status": "running", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{ "status": "ok", "text": "Reboot initiated." } diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_json.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_json.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_json.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -[{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"running"}] diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_json_post.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_json_post.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_blocks_json_post.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_blocks_json_post.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"queued", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff -Nru libcloud-0.5.0/test/compute/fixtures/bluebox/api_block_templates_json.json libcloud-0.15.1/test/compute/fixtures/bluebox/api_block_templates_json.json --- libcloud-0.5.0/test/compute/fixtures/bluebox/api_block_templates_json.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/bluebox/api_block_templates_json.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -[{"public": true, "id": "c66b8145-f768-45ef-9878-395bf8b1b7ff", "description": "CentOS 5 (Latest Release)", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "1fc24f51-6d7d-4fa9-9a6e-0d6f36b692e2", "description": "Ubuntu 8.10 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b6f152db-988c-4194-b292-d6dd2aa2dbab", "description": "Debian 5.0 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "4b697e48-282b-4140-8cf8-142e2a2711ee", "description": "Ubuntu 8.04 LTS 64bit", "created": "2009/07/31 15:58:20 -0700"}, {"public": true, "id": "a6a141bf-592a-4fa6-b130-4c14f69e82d0", "description": "Ubuntu 8.04 LTS 32Bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b181033f-aea7-4e6c-8bb4-11169775c0f8", "description": "Ubuntu 9.04 64bit", "created": "2010/01/26 11:31:19 -0800"}, {"public": true, "id": "b5371c5a-9da2-43ee-a745-99a4723f624c", "description": "ArchLinux 2009.08 64bit", "created": "2010/02/13 18:07:01 -0800"}, {"public": true, "id": "a00baa8f-b5d0-4815-8238-b471c4c4bf72", "description": "Ubuntu 9.10 64bit", "created": "2010/02/17 22:06:21 -0800"}, {"public": true, "id": "03807e08-a13d-44e4-b011-ebec7ef2c928", "description": "Ubuntu 10.04 LTS 64bit", "created": "2010/05/04 14:43:30 -0700"}, {"public": true, "id": "8b60e6de-7cbc-4c8e-b7df-5e2f9c4ffd6b", "description": "Ubuntu 10.04 LTS 32bit", "created": "2010/05/04 14:43:30 -0700"}] diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/create_server.json libcloud-0.15.1/test/compute/fixtures/brightbox/create_server.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/create_server.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/create_server.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -{"id": "srv-3a97e", - "url": "servers/(server_id)", - "name": "My web server", - "status": "active", - "hostname": "srv-3a97e.gb1.brightbox.com", - "created_at": "", - "deleted_at": "", - "started_at": "", - "account": - {"id": "acc-3jd8s", - "url": "accounts/(account_id)", - "name": "Brightbox Systems Ltd.", - "status": "verified", - "ram_limit": 20480, - "ram_used": 2048, - "limits_cloudips": 5}, - "image": - {"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright"}, - "server_type": - {"id": "typ-a97e6", - "url": "server_types/(server_type_id)", - "handle": "nano", - "name": "Brightbox Nano", - "status": "", - "cores": 2, - "ram": 2048, - "disk_size": ""}, - "zone": - {"id": "zon-8ja0a", - "url": "zones/(zone_id)", - "handle": "gb1-a"}, - "snapshots": - [{"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright"}], - "cloud_ips": - [{"id": "cip-ja8ub", - "url": "cloud_ips/(cloud_ip_id)", - "public_ip": "109.107.42.129", - "status": "mapped", - "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], - "interfaces": - [{"id": "int-mc3a9", - "url": "interfaces/(interface_id)", - "mac_address": "02:24:19:6e:18:36", - "ipv4_address": "10.110.24.54"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/list_images.json libcloud-0.15.1/test/compute/fixtures/brightbox/list_images.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/list_images.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/list_images.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -[{"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright", - "ancestor": - {"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright"}}] diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/list_servers.json libcloud-0.15.1/test/compute/fixtures/brightbox/list_servers.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/list_servers.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/list_servers.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -[{"id": "srv-3a97e", - "url": "servers/(server_id)", - "name": "My web server", - "status": "active", - "hostname": "srv-3a97e.gb1.brightbox.com", - "created_at": "", - "deleted_at": "", - "started_at": "", - "account": - {"id": "acc-3jd8s", - "url": "accounts/(account_id)", - "name": "Brightbox Systems Ltd.", - "status": "verified", - "ram_limit": 20480, - "ram_used": 2048, - "limits_cloudips": 5}, - "image": - {"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright"}, - "server_type": - {"id": "typ-a97e6", - "url": "server_types/(server_type_id)", - "handle": "nano", - "name": "Brightbox Nano", - "status": "", - "cores": 2, - "ram": 2048, - "disk_size": ""}, - "zone": - {"id": "zon-8ja0a", - "url": "zones/(zone_id)", - "handle": "gb1-a"}, - "snapshots": - [{"id": "img-9vxqi", - "url": "images/(image_id)", - "name": "Brightbox Lucid 32", - "status": "available", - "description": "Jeremy's debian ec2 image", - "source": "jeremy_debian-32_ec2", - "source_type": "upload", - "arch": "32-bit", - "created_at": "", - "owner": "acc-bright"}], - "cloud_ips": - [{"id": "cip-ja8ub", - "url": "cloud_ips/(cloud_ip_id)", - "public_ip": "109.107.42.129", - "status": "mapped", - "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], - "interfaces": - [{"id": "int-mc3a9", - "url": "interfaces/(interface_id)", - "mac_address": "02:24:19:6e:18:36", - "ipv4_address": "10.110.24.54"}]}] diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/list_server_types.json libcloud-0.15.1/test/compute/fixtures/brightbox/list_server_types.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/list_server_types.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/list_server_types.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -[{"id": "typ-4nssg", - "url": "server_types/typ-4nssg", - "handle": "nano", - "name": "Brightbox Nano Instance", - "status": "", - "cores": 1, - "ram": 512, - "disk_size": 10240}] diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/list_zones.json libcloud-0.15.1/test/compute/fixtures/brightbox/list_zones.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/list_zones.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/list_zones.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[{"id": "zon-8ja0a", - "url": "zones/(zone_id)", - "handle": "gb1-a"}] diff -Nru libcloud-0.5.0/test/compute/fixtures/brightbox/token.json libcloud-0.15.1/test/compute/fixtures/brightbox/token.json --- libcloud-0.5.0/test/compute/fixtures/brightbox/token.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/brightbox/token.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"access_token":"k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_clone.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_clone.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_clone.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_clone.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -status active -use dbserver,general -name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub clone -bits 64 -url http://www.microsoft.com/sqlserver/2008/en/us/ -read:bytes 4096 -description Please refer to the install notes for a full guide to initial configuration. -write:bytes 21474840576 -drive a814def5-1789-49a0-bf88-7abe7bb1682a -install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n -volume 000431a5-46d9-4a67-9c03-3c3402a41992 -host 00043e69-ac57-45b1-8692-75db24064fb9 -os windows -user 93b34fd9-7986-4b25-8bfd-98a50383605d -read:requests 1 -licenses msft_p73_04837 msft_tfa_00009 -type disk -write:requests 5242881 -size 21474836480 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_info.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_info.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_info.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_info.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -status active -use general -name test node -bits 64 -url http://www.centos.org/ -read:bytes 4096 -description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. -write:bytes 21474840576 -os linux -drive 3d18db4b-f9bd-4313-b034-12ae181efa88 -install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 1 -free true -type disk -write:requests 5242881 -size 53687091200 - -status active -use general -name test node -bits 64 -url http://www.centos.org/ -read:bytes 4096 -description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. -write:bytes 21474840576 -os linux -drive 3d18db4b-f9bd-4313-b034-12ae181efa99 -install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 1 -free true -type disk -write:requests 5242881 -size 103687091200 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_single_info.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_single_info.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_single_info.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_single_info.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -status active -use general -name test node -bits 64 -url http://www.centos.org/ -read:bytes 4096 -description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. -write:bytes 21474840576 -os linux -drive d18119ce_7afa_474a_9242_e0384b160220 -install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 1 -free true -type disk -write:requests 5242881 -size 53687091200 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_standard_info.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_standard_info.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/drives_standard_info.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/drives_standard_info.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,1735 +0,0 @@ -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Yoper is a multipurpose high performance operating system which has been carefully optimised for PC's with either 686 or higher processor types. The binaries that come with Yoper have been built from scratch using the original sources combined with the best features of major distros, measuring up to the demanding proliferation of network communications and more intensive digital multimedia, graphics and audio capabilities which are ushering in a new era of business productivity enabled by a new generation of sophisticated microprocessors, and business application tools. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 102401 -name Yoper 2010 Linux with XFCE Install CD -url http://yoper-linux.org/ -read:bytes 4096 -claim:type shared -drive 7e3e7628-d1e6-47c6-858d-7b54aac5c916 -write:bytes 419434496 -read:requests 1 -os linux - -type cdrom -size 2621440000 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description Ultimate Edition, first released in December 2006, is a fork of Ubuntu. The goal of the project is to create a complete, seamlessly integrated, visually stimulating, and easy-to-install operating system. Single-button upgrade is one of several special characteristics of this distribution. Other main features include custom desktop and theme with 3D effects, support for a wide range of networking options, including WiFi and Bluetooth, and integration of many extra applications and package repositories. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 640001 -name Ultimate 2.6 Linux 64bit Install CD -url http://ultimateedition.info/ -read:bytes 440279040 -claim:type shared -drive 526ed5cb-6fbe-46fb-a064-7707c844d774 -write:bytes 2621444096 -read:requests 107490 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 168961 -name FreeBSD 8.0 Linux 64bit Install CD -url http://www.freebsd.org/ -read:bytes 479866880 -claim:type shared -drive 95380e4c-4f69-432d-be2b-1965a282bdb9 -write:bytes 692064256 -read:requests 117155 -os other - -type cdrom -size 218103808 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description The Fedora 13 x86_64 (64bit) network installation CD -favourite true -install_notes The Fedora 13 network installaton cd will install, through the network, the latest Fedora packages; since it includes the "updates" repo.\n\nThe minimal install option offers great ground to build on top of a very nice base. This configuration is recommended for most servers.\n\nBuild your own and share them wth us! -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 53249 -name Fedora 13 Linux x86 64bit netinst Install CD -url http://fedoraproject.org/ -read:bytes 1444963840 -claim:type shared -drive 14b1e97f-5bba-4cf1-aec4-7b7b573826c2 -write:bytes 218107904 -read:requests 352119 -os linux - -type cdrom -size 452984832 -use security -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description The Untangle Lite package offers a collection of free, open-source software applications to run on the Untangle Server. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 110593 -name Untangle 7.4 Linux 64bit Install CD -url http://www.untangle.com/ -read:bytes 4096 -claim:type shared -drive 06c39099-9f75-40f4-b2e1-6012c87f3579 -write:bytes 452988928 -read:requests 1 -os linux - -type cdrom -size 138412032 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type preinstalled -status active -description Puppy Linux is extraordinarily small, yet quite full featured. Puppy boots into a 64MB ramdisk, and that's it, the whole caboodle runs in RAM. Unlike live CD distributions that have to keep pulling stuff off the CD, Puppy in its entirety loads into RAM. This means that all applications start in the blink of an eye and respond to user input instantly. Puppy Linux has the ability to boot off a flash card or any USB memory device, CDROM, Zip disk or LS/120/240 Superdisk, floppy disks, internal hard drive. It can even use a multisession formatted CD-R/DVD-R to save everything back to the CD/DVD with no hard drive required at all! -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 33793 -name Puppy 5.1 Linux Install CD -url www.puppylinux.org -read:bytes 276828160 -claim:type shared -drive 60111502-6ff3-43e1-9485-5be775f81657 -write:bytes 138416128 -read:requests 67585 -os linux - -type cdrom -size 171966464 -use router,general,networking -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type preinstalled -status active -description Vyatta project is a Linux-based router and firewall offering a free community edition and two commercial editions with support. Vyatta has changed the networking world by developing the first commercially supported, open-source router & firewall solution. Vyatta combines the features, performance and reliability of an enterprise router & firewall with the cost savings, flexibility and security of open source. -favourite true -install_notes \nCD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 41985 -name Vyatta 6.1 Live CD -url www.yyatta.com -read:bytes 687869952 -claim:type shared -drive 8159ab9b-9703-48f6-a206-ac26efe8fdc2 -write:bytes 171970560 -read:requests 167937 -os linux - -type cdrom -size 721420288 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description ZenLive Linux is a live cd derived from Zenwalk distribution. Zenwalk Linux (formerly Minislack) is a Slackware-based Linux distribution with focus on Internet applications, multimedia and programming tools. ZenLive Linux LiveCD is a complete system with software for Internet browsing, mail, chat, multimedia and office, as well as for programming in C, Perl, Python and Ruby. The main objectives of Zenwalk Linux are to be simple and fast, provide one application per task, be a complete development and desktop environment and to be small enough to fit on a 400MB ISO image. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 176129 -name Zenlive 6.4 Linux Install CD -url http://www.zenwalk.org/ -read:bytes 721424384 -claim:type shared -drive fcc2aa68-24ce-438e-8386-1d4e66336155 -write:bytes 721424384 -read:requests 176129 -os linux - -type cdrom -claimed 00059836-5512-4ce2-bf66-4daab2d994e4:guest:2e82c87e-61a1-443c-bc81-5c3167df5c11:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:3234b1fc-415f-4019-ada1-27781aea8750:ide:0:0 -size 4198498304 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description CentOS as a group is a community of open source contributors and users. Typical CentOS users are organisations and individuals that do not need strong commercial support in order to achieve successful operation. CentOS is 100% compatible rebuild of the Red Hat Enterprise Linux, in full compliance with Red Hat's redistribution requirements. CentOS is for people who need an enterprise class operating system stability without the cost of certification and support. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 1025025 -name CentOS 5.5 Linux 32bit Install DVD -url http://www.centos.org -read:bytes 16706375680 -claim:type shared -drive 6e0e2282-c29a-4d19-97e6-7ddb7cdf0dd2 -write:bytes 4198502400 -read:requests 4078705 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd,livecd -status active -description Ubuntu is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit."Ubuntu&quot; is an ancient African word, meaning"humanity to others&quot;. The Ubuntu distribution brings the spirit of Ubuntu to the software world. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 179201 -name Ubuntu 10.04 Linux 32bit Install CD -url http://www.ubuntu.com/ -read:bytes 1298436608 -claim:type shared -drive 0e305bb9-f512-4d4a-894c-4a733cae570f -write:bytes 734007296 -read:requests 295036 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd,livecd -status active -description Chakra is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 204801 -name Chakra Alpha 5 Linux 64bit Install and Live CD -url http://chakra-project.org/ -read:bytes 185200640 -claim:type shared -drive c0856590-c2b1-4725-9448-bba7c74d35dc -write:bytes 838864896 -read:requests 45215 -os linux - -type cdrom -claimed 00043e69-ac57-45b1-8692-75db24064fb9:guest:4c014a4e-615e-489e-b22a-bf966bce83d7:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:0a9d4833-fc5f-4825-9626-5a3e6555d329:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:71d09667-fd6b-491a-949f-6a7ab9c70907:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:c8264872-67a1-4452-a736-8dc6ef9eb07d:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:6efe92c3-0126-4ddb-9140-73706c804c3b:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:158c515f-1649-44f0-895c-f0de39575a1c:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:7d62f26e-2062-469e-846a-b926dffb00b1:ide:0:0 -size 4697620480 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description - -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 1146881 -name Debian Linux 5.0 Install CD -url http://www.debian.org/ -read:bytes 4612921344 -claim:type shared -drive 794a068d-228c-4758-81f0-e1bc955a6cce -write:bytes 4697624576 -read:requests 985768 -os linux - -type cdrom -size 2751463424 -use dev,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type livecd -status active -description Scientific Linux is a recompiled Red Hat Enterprise Linux put together by various labs and universities around the world. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 674125 -name Scientific Linux 5.5 64bit Live CD -url https://www.scientificlinux.org/ -read:bytes 10903552 -claim:type shared -drive 7aa74ca3-4c64-4b08-9972-eddeb38a650d -write:bytes 2761216000 -read:requests 2662 -os linux - -type cdrom -size 612368384 -use networking,other -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description Nexenta OS is a free and open source operating system combining the OpenSolaris kernel with GNU application userland. Nexenta OS runs on Intel/AMD 32-/64-bit hardware and is distributed as a single installable CD. Upgrades and binary packages not included on the CD can be installed from Nexenta OS repository using Advanced Packaging Tool. In addition, source based software components can be downloaded from network repositories available at Debian/GNU Linux and Ubuntu Linux. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 149357 -name NexentaStor 3.0.3 Linux 32bit/64bit Install CD -url http://www.nexenta.org/ -read:bytes 2822144 -claim:type shared -drive 2c3369a5-22eb-4462-8137-35a62b7a93cf -write:bytes 611766272 -read:requests 689 -os other - -type cdrom -size 301989888 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Zenwalk Linux (formerly Minislack) is a Slackware-based GNU/Linux operating system with a goal of being slim and fast by using only one application per task and with focus on graphical desktop and multimedia usage. Zenwalk features the latest Linux technology along with a complete programming environment and libraries to provide an ideal platform for application programmers. Zenwalk's modular approach also provides a simple way to convert Zenwalk Linux into a finely-tuned modern server (e.g. LAMP, messaging, file sharing). -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 73342 -name Zenwalk Core 6.4 Install CD -url http://yoper-linux.org/ -read:bytes 1576960 -claim:type shared -drive 3d58f1c6-9ec4-4963-917e-9917d39e5003 -write:bytes 300408832 -read:requests 385 -os linux - -type cdrom -size 67108864 -use general,security -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type preinstalled -status active -description IPFire is a linux-distribution that focusses on easy setup, good handling and a high niveau of security. It is operable via an intuitive webinterface, which offers a lot of playground for beginners and even experienced administrators. IPFire is maintained by experienced developers, who are really concerned about security and regulary updates to keep it secure. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 16385 -name IPFire 2.7 Core 40 Linux 32bit Install CD -url http://www.ipfire.org/ -read:bytes 4096 -claim:type shared -drive 231aa9af-f2ef-407c-9374-76a1215b94d3 -write:bytes 67112960 -read:requests 1 -os linux - -type cdrom -size 734003200 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Legacy OS (formerly TEENpup Linux) is a distribution based on Puppy Linux. Although the original concept was to create a flavour of Puppy Linux with more applications and a more appealing desktop aimed at teenage users, Legacy OS has now grown to become a general purpose distribution. It comes with a large number of applications, browser plugins and media codecs as standard software. Despite these enhancements Legacy OS is still perfectly suitable for installation on older and low-resource computers, as well as modern hardware. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 179201 -name Legacy OS Linux 32bit Install CD -url http://pupweb.org/wikka/TeenPup -read:bytes 4096 -claim:type shared -drive 39f24226-dc6c-40e2-abc8-e8f2da976671 -write:bytes 734007296 -read:requests 1 -os linux - -type cdrom -size 209715200 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description Yohsuke Ooi has announced the release of Momonga Linux 7, a Japanese community distribution loosely modelled on Fedora. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 51201 -name Momonga 7 Linux 64bit Net Install CD -url http://www.momonga-linux.org/ -read:bytes 4096 -claim:type shared -drive f424888b-e66e-43f4-99c1-2991a5b82894 -write:bytes 209719296 -read:requests 1 -os linux - -type cdrom -size 713031680 -use general,security,systemrecovery -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type livecd -status active -description CAINE (Computer Aided INvestigative Environment) is an Ubuntu-based GNU/Linux live distribution created as a project of Digital Forensics for Interdepartmental Centre for Research on Security (CRIS), supported by the University of Modena and Reggio Emilia in Italy. The CAINE forensic framework contains a collection of tools wrapped up into a user-friendly environment. It introduces novel features - it aims to fill the interoperability gap across different forensic tools, it provides a homogeneous GUI that guides digital investigators during the acquisition and analysis of electronic evidence, and it offers a semi-automatic process for the documentation and report compilation. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 174081 -name Caine 2.0 Linux 32bit Live CD -url http://www.caine-live.net/ -read:bytes 4096 -claim:type shared -drive 9768a0d1-e90c-44eb-8da7-06bca057cb93 -write:bytes 713035776 -read:requests 1 -os linux - -type cdrom -size 708837376 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Kongoni GNU/Linux is a Slackware-based, desktop-oriented GNU/Linux distribution and live CD. Its main features include a graphical installer, a Kongoni Integrated Setup System (KISS), and an easy-to-use Ports Installation GUI (PIG). The distribution's package management borrows its main concepts from BSD ports, with an intuitive graphical package installer that compiles and installs programs from source code on the user's system. Kongoni, which means gnu (also known as wildebeest) in Shona, includes only software that complies with Free Software Foundation's definition of software freedom. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 173057 -name Kongoni 1.12.3 Linux 32bit Live CD -url http://www.kongoni.org/ -read:bytes 4096 -claim:type shared -drive 6ac51b9d-a1db-44fc-b325-30bdefd0dd0a -write:bytes 708841472 -read:requests 1 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type livecd -status active -description Debris Linux is a minimalist, desktop-oriented distribution and live CD based on Ubuntu. It includes the GNOME desktop and a small set of popular desktop applications, such as GNOME Office, Firefox web browser, Pidgin instant messenger, and ufw firewall manager. Debris Linux ships with a custom kernel, a custom system installer called DebI, and a script that makes it easy to save and restore any customisations made while in live mode. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Debris 2.0 Linux Live CD -url http://debrislinux.org/ -read:bytes 0 -claim:type shared -drive 258e1026-36bf-4368-ba7c-52836de4f757 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 1887436800 -use systemrecovery,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type livecd -status active -description Toorox is a Linux Live-DVD based on Gentoo that starts as a bootable media using KNOPPIX technology. While the system is booting, all necessary drivers will be included automatically (lshwd). Toorox is only using the memory and an existing swap partition at runtime, so your hard disks won't be touched by default. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Toorox 05.2010 Linux 64bit Live CD -url http://toorox.de/ -read:bytes 0 -claim:type shared -drive 8fa3bc29-47e8-496a-89c6-02872a0d2642 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 2516582400 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's"4.4BSD-Lite&quot; release, with some"4.4BSD-Lite2&quot; enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's"Net/2&quot; to the i386, known as"386BSD&quot;, though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name FreeBSD 7.3 Linux 64bit Install CD -url http://www.freebsd.org/ -read:bytes 13836288 -claim:type shared -drive 92444414-dc65-451d-9018-2b1ab8db4ceb -write:bytes 0 -read:requests 3378 -os other - -type cdrom -size 1073741824 -use systemrecovery,security -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd,livecd -status active -description KANOTIX is a Linux distribution based on the latest stable release of Debian GNU/Linux. It is built on top of a latest kernel which is carefully patched with fixes and drivers for most modern hardware. Although it can be used as a live CD, it also includes a graphical installer for hard disk installation. The user-friendly nature of the distribution is further enhanced by a custom-built control centre and specialist scripts. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Kanotix 4-2.6 Linux with KDE 64bit Install and Live CD -url http://www.kanotix.com/ -read:bytes 232169472 -claim:type shared -drive c7c33c07-5e28-42c8-9800-eb40e2aef287 -write:bytes 0 -read:requests 56682 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description TinyMe is a Unity Linux-based mini-distribution. It exists to ease installation of Unity Linux on older computers, to provide a minimal installation for developers, and to deliver a fast Linux installation for where only the bare essentials are needed. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name TinyMe 2010 Linux Install CD -url http://tinymelinux.com/ -read:bytes 0 -claim:type shared -drive 87b3f98c-c95c-454d-a002-bef63f5bbc1a -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description moonOS is a complete, Ubuntu-based distribution featuring the LXDE and Enlightenment 17 desktop managers and imaginative, original artwork. A project created and designed by Cambodian artist Chanrithy Thim, moonOS is intended as an operating system for any desktop, laptop or virtual machine. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name MoonOS 3 Linux 32bit Install CD -url http://www.moonos.org/ -read:bytes 0 -claim:type shared -drive d2651d5b-3760-41be-a8b0-6fe5ca208825 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description - -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Yoper 2010 Linux with KDE3 Install CD -url http://yoper-linux.org/ -read:bytes 0 -claim:type shared -drive 50e0ca32-c04a-47e3-be37-1cd6f0ad9ff8 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Easy Peasy is an Ubuntu based operating system for netbooks. It's optimized for netbooks and favors the best software available by delivering Firefox with Flash and Java, Skype, Google Picasa, Songbird etc. out of the box. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name EasyPeasy 1.6 Linux Install CD -url http://www.geteasypeasy.com/ -read:bytes 195153920 -claim:type shared -drive daac6531-8f59-4c96-baa0-6545350d5a5e -write:bytes 0 -read:requests 47645 -os linux - -type cdrom -size 1572864000 -use email,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description Calculate Linux is a Gentoo-based family of three distinguished distributions. Calculate Directory Server (CDS) is a solution that supports Windows and Linux clients via LDAP + SAMBA, providing proxy, mail and Jabbers servers with streamlined user management. Calculate Linux Desktop (CLD) is a workstation and client distribution with KDE, GNOME or Xfce desktop that includes a wizard to configure a connection to Calculate Directory Server. Calculate Linux Scratch (CLS) is live CD with a build framework for creating a custom distribution. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Calculate 10.4 Linux 64bit Install CD -url http://www.calculate-linux.org/ -read:bytes 16932864 -claim:type shared -drive 20f5b0dd-5c63-40aa-97b8-5b34e5107a25 -write:bytes 0 -read:requests 4134 -os linux - -type cdrom -size 734003200 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description KahelOS is a Linux distribution based on Arch Linux. Its desktop edition comes with pre-configured GNOME as the default desktop environment, GNOME Office productivity suite, Epiphany web browser, GIMP image manipulation program, and other popular GTK+ and GNOME applications. Like Arch Linux, KahelOS maintains a rolling-release model of updating software packages using its parent's repositories. A server edition is also available. Both the desktop and server editions come in the form of installation CDs with text-based installers, but no live media. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name KahelOS 05-01-2010 Linux 64bit Install CD -url http://www.kahelos.org/ -read:bytes 0 -claim:type shared -drive 1ddaedbf-ceb8-43b5-a587-e9e635d97f50 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 524288000 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd,livecd -status active -description PCLinuxOS is a user-friendly, Mandriva-based Linux distribution with out-of-the-box support for many popular graphics and sound cards, as well as other peripheral devices. The bootable live CD provides an easy-to-use graphical installer and the distribution sports a wide range of popular applications for the typical desktop user, including browser plugins and full multimedia playback. The intuitive system configuration tools include Synaptic for package management, Addlocale to add support to many languages, Getopenoffice to install the latest OpenOffice.org, and Mylivecd to create a customised live CD. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name PCLinuxOS 2010.1 Linux with KDE Install and Live CD -url http://www.pc-os.org/ -read:bytes 0 -claim:type shared -drive 3e0f427e-10eb-4277-bc3b-48f054908a09 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 524288000 -use multimedia,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type livecd -status active -description Peppermint was designed for enhanced mobility, efficiency and ease of use. While other operating systems are taking 10 minutes to load, you are already connected, communicating and getting things done. And, unlike other operating systems, Peppermint is ready to use out of the box. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Peppermint 1.0 Linux Live CD -url http://peppermintos.com/ -read:bytes 0 -claim:type shared -drive 92ffa2f6-f663-49d9-98ec-dc0b474369c4 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 419430400 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type livecd -status active -description PureOS and PureOSlight are GNU/Linux live CDs based on Debian's testing repository. These are desktop distributions that can be used as live media (CD or USB) or as full-featured operating systems installed on a hard disk. PureOS is a 700 MB live CD with KDE, Iceweasel, Icedove, OpenOffice.org, Songbird, VLC and K3B. PureOSlight is a small 300 MB live CD with Xfce, Iceweasel, Icedove, AbiWord, Gnumeric and Exaile. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name PureOS 2.0 Linux Live CD -url http://www.pureos.org/ -read:bytes 100663296 -claim:type shared -drive ed6421b5-41c2-4ba3-a3c9-7c330d36e5b3 -write:bytes 0 -read:requests 24576 -os linux - -type cdrom -size 104857600 -use dev,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description ReactOSĀ® is a free, modern operating system based on the design of WindowsĀ® XP/2003. Written completely from scratch, it aims to follow the Windows-NTĀ® architecture designed by Microsoft from the hardware level right through to the application level. This is not a Linux based system, and shares none of the unix architecture. The main goal of the ReactOS project is to provide an operating system which is binary compatible with Windows. This will allow your Windows applications and drivers to run as they would on your Windows system. Additionally, the look and feel of the Windows operating system is used, such that people accustomed to the familiar user interface of WindowsĀ® would find using ReactOS straightforward. The ultimate goal of ReactOS is to allow you to remove WindowsĀ® and install ReactOS without the end user noticing the change. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name ReactOS 0.3.11 Alpha Install CD -url http://www.reactos.org/ -read:bytes 0 -claim:type shared -drive 327fd7dd-a2ca-4437-b87e-7610fccc3202 -write:bytes 0 -read:requests 0 -os other - -type cdrom -size 1887436800 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Ubuntu Studio is a variant of Ubuntu aimed at the GNU/Linux audio, video and graphic enthusiast as well as professional. The distribution provides a collection of open-source applications available for multimedia creation. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Ubuntu Studio 10.04 Linux 32bit Install CD -url http://www.ubuntu.com/ -read:bytes 499675136 -claim:type shared -drive c6a368d1-cae6-43d9-8af6-b42142aed4b9 -write:bytes 0 -read:requests 121991 -os linux - -type cdrom -size 1073741824 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -bits 32 -drive_type livecd -status active -description Vector Linux is a small, fast, Intel based Linux operating system for PC style computers. The creators of Vector Linux had a single credo: keep it simple, keep it small and let the end user decide what their operating system is going to be. What has evolved from this concept is perhaps the best little Linux operating system available anywhere. For the casual computer user you have a lightening fast desktop with graphical programs to handle your daily activities from web surfing, sending and receiving email, chatting on ICQ or IRC to running an ftp server. The power user will be pleased because all the tools are there to compile their own programs, use the system as a server or perhaps the gateway for their home or office computer network. Administrators will be equally as pleased because the small size and memory requirements of the operating system can be deployed on older machines maybe long forgotten. -favourite true -free true -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Vector 6.0 Linux with KDE Live CD -url http://www.vectorlinux.com/ -read:bytes 0 -claim:type shared -drive 0aa0b75d-ce40-4877-9882-8a81443911fe -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 713031680 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Greenie Linux is an Ubuntu-based distribution customised for Slovak and Czech users. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Greenie 7 Linux 32bit Install CD -url http://www.greenie.sk/ -read:bytes 0 -claim:type shared -drive bdddc973-e84f-4cbc-a2c9-a9fce73bc462 -write:bytes 0 -read:requests 0 -os linux - -type cdrom -size 67108864 -use networking,gateway -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd,livecd -status active -description pfSense is a free, open source customized distribution ofĀ FreeBSDĀ tailored for use as a firewall and router. In addition to being a powerful, flexible firewalling and routing platform, it includes a long list of related features and a package system allowing further expandability without adding bloat and potential security vulnerabilities to the base distribution. pfSense is a popular project with more than 1 million downloads since its inception, and proven in countless installations ranging from small home networks protecting a PC and an Xbox to large corporations, universities and other organizations protecting thousands of network devices.Ā  -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name PfSense 1.2.3 Linux 32bit Live and Install CD -url http://www.pfsense.org -read:bytes 68657152 -claim:type shared -drive db46ea0d-26f3-4cd0-8a55-54da2af10363 -write:bytes 0 -read:requests 16762 -os linux - -type cdrom -size 46137344 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type installcd -status active -description Mandrake Linux was created in 1998 with the goal of making Linux easier to use for everyone. At that time, Linux was already well-known as a powerful and stable operating system that demanded strong technical knowledge and extensive use of the "command line"; MandrakeSoft saw this as an opportunity to integrate the best graphical desktop environments and contribute its own graphical configuration utilities and quickly became famous for setting the standard in ease-of-use and functionality. Mandriva Linux, formerly known as Mandrakelinux, is a friendly Linux Operating System which specializes in ease-of-use for both servers and the home/office. It is freely available in many languages throughout the world. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Mandriva Spring 2010 Linux 64bit Net Install CD -url http://mandriva.com/ -read:bytes 19488768 -claim:type shared -drive 857456e4-e16c-4a6f-9bfc-f5be3e58bde5 -write:bytes 0 -read:requests 4758 -os linux - -type cdrom -size 1606418432 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description UHU-Linux is the leading distribution of Linux in Hungary. It is primarily intended for Hungarian users, thus special care is taken to support the Hungarian language as much as possible. Ease of installation and sane default settings both help new users of Linux and make veterans feel comfortable. Usability as the main goal involves having all the cutting-edge yet stable releases of Open Source packages, with dpkg as the package manager. Development is completely open and everyone is invited to join. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 392193 -name Uhu Linux 2.2 32bit Install CD -url http://uhulinux.hu/ -read:bytes 354873344 -claim:type shared -drive 9d99705b-818a-49f8-8c77-0cd4a42cdea6 -write:bytes 1606422528 -read:requests 86639 -os linux - -type cdrom -size 734003200 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description Chakra, a fast, user-friendly and extremely powerful Live CD and/or distrolet based on the award winning KDE Software Compilation and on the GNU/Linux distribution for connoisseurs: Arch Linux. Currently in alpha stage, it features a graphical installer, automatic hardware configuration, and of course some more tools and extras. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 00106cda-0e17-40c8-a576-b516f0eb67bc -host 00109617-2c6b-424b-9cfa-5b572c17bafe -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 179201 -name Chakra 2.0 Linux Live CD -url http://www.chakra-project.org/ -read:bytes 4096 -claim:type shared -drive fdfa8104-05fb-4210-aba5-fe78c4e6ee8c -write:bytes 734007296 -read:requests 1 -os linux - -type cdrom -size 662700032 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 32 -drive_type installcd -status active -description UberStudent ("uber" meaning "productive" in Latin) is an Ubuntu-based distribution on a DVD designed for learning and teaching academic computing at the higher education and advanced secondary levels. UberStudent comes with software for everyday computing tasks, plus a core set of programs and features designed to teach and make easier the tasks and habits common to high-performing students of all academic disciplines. Lifelong learners, as well as any sort of knowledge worker, will equally benefit. UberStudent is supported by a free Moodle virtual learning environment. -favourite true -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0013fc75-b170-4d62-abaf-804b8fc466cc -host 001318df-35c6-439f-8e72-8d57c36ca86b -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 161793 -name UberStudent 1.0 Linux with LXDE 32bit Install CD -url http://www.uberstudent.org/ -read:bytes 4096 -claim:type shared -drive 854a9706-fb14-4868-80df-53d712f1531a -write:bytes 662704128 -read:requests 1 -os linux - -type disk -size 3221225472 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -description This is a pre-installed ready Fedora system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. The Fedora Project is an openly-developed project designed by Red Hat, open for general participation, led by a meritocracy, following a set of project objectives. The goal of The Fedora Project is to work with the Linux community to build a complete, general purpose operating system exclusively from open source software. Development will be done in a public forum. The project will produce time-based releases of Fedora about 2-3 times a year, with a public release schedule. The Red Hat engineering team will continue to participate in building Fedora and will invite and encourage more outside participation than in past releases. By using this more open process, we hope to provide an operating system more in line with the ideals of free software and more appealing to the open source community. -favourite true -install_notes ***You must update the default root/superuser password for Fedora 13 on first login.***\n\nPre-installed Fedora 13 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Complete the personalisation of your new server\n---------------------------------------------------------------------\nUpon first start-up you should be presented with the welcome screen after the initial boot process has completed. You will now have the opportunity to personalise your system installation. \na) Click the 'forward' button to get started.\n\nb) You will now see the license information for this system. Fedora 13 has an open source GNU license. Assuming this is acceptable you should click the 'forward' button again.\n\nc) You can now create your own user account, enter your name and set the password. Please note:\n\nTHIS IS NOT THE ADMINISTRATIVE ACCOUNT. YOU SHOULD RESET THE ROOT/SUPERUSER PASSWORD AS OUTLINED IN STEP 4 BELOW AFTER COMPLETING STEP 3.\n\nd) After clicking forward again you will have the opportunity to set the time servers that will set your servers time. You can just leave the default values unless you have some specific needs. Once you are happy please click the 'forward' button.\n\ne) Finally you have the option to submit your hardware profile to the Fedora Project to help with their development. This is entirely your personal choice. Either way once you are ready click the 'finish' button.\n\n4. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsu root\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n7. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 0003ca60-6b03-4da9-a409-84d8d7afa738 -host 00031836-a624-4b22-bc7d-41ff8977087b -user 00000000-0000-0000-0000-000000000001 -autoexpanding true -write:requests 786433 -name Fedora 13 Linux 64bit Preinstalled System -url http://fedoraproject.org/ -read:bytes 40962080768 -claim:type shared -drive d18119ce-7afa-474a-9242-e0384b160220 -write:bytes 3221229568 -read:requests 10000508 -os linux - -type disk -size 4294967296 -use dbserver,webserver,email,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -favourite true -install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 0003ca60-6b03-4da9-a409-84d8d7afa738 -host 00031836-a624-4b22-bc7d-41ff8977087b -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 1048577 -name Debian 5.0 Preinstalled without X -url www.debian.org -read:bytes 35180666880 -claim:type shared -drive fd49670e-17e8-4b0e-b03e-d6a65c138445 -write:bytes 4294971392 -read:requests 8589030 -os linux - -type disk -size 21474836480 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. -favourite true -install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 000431a5-46d9-4a67-9c03-3c3402a41992 -host 00043e69-ac57-45b1-8692-75db24064fb9 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 5242881 -name CentOS 5.5 Linux 64bit Preinstalled System -url http://www.centos.org/ -read:bytes 251925499904 -claim:type shared -drive 1ea7dead-9d52-4e79-9a9b-435db7cc972c -write:bytes 21474840576 -read:requests 61505249 -os linux - -type disk -size 2684354560 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -description This is a pre-installed ready Ubuntu system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. Ubuntu Linux is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit. "Ubuntu" is an ancient African word, meaning "humanity to others". The Ubuntu Linux distribution brings the spirit of Ubuntu to the software world. -favourite true -install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 00059deb-640a-464e-9509-6a3ec6cfd549 -host 00059836-5512-4ce2-bf66-4daab2d994e4 -user 00000000-0000-0000-0000-000000000001 -autoexpanding true -write:requests 655361 -name Ubuntu Linux 10.04 Desktop 64bit Preinstalled System -url http://www.ubuntu.com/ -read:bytes 24617140224 -claim:type shared -drive 99a75966-209f-41d5-817c-7a3916354540 -write:bytes 2684358656 -read:requests 6010044 -os linux - -type disk -size 8589934592 -use dbserver,webserver,email,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -favourite true -install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 01/09/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 -host 0008d252-5102-43a0-82c6-18e8e2dd2bff -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 2097153 -name Ubuntu 10.04 Server Edition Linux 64bit -url http://www.ubuntu.com/server -read:bytes 71391387648 -claim:type shared -drive 0b060e09-d98b-44cc-95a4-7e3a22ba1b53 -write:bytes 8589938688 -read:requests 17429538 -os linux - -type disk -size 21474836480 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -description This is a pre-installed ready CentOS system including AppFirst monitoring software that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. -favourite true -install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux with AppFirst Monitoring on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\ne) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nf) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ng) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n6. AppFirst\n-------------------------\nThis disk image includes AppFirst monitoring software already installed. This software is able to provide in-depth server and application performance feedback. In order to take advantage of this software you need to have an AppFirst account.\n\nFull details of AppFirst's services including a 14-day free trial are available at http://www.appfirst.com . -volume 00106cda-0e17-40c8-a576-b516f0eb67bc -host 00109617-2c6b-424b-9cfa-5b572c17bafe -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 5242881 -name CentOS 5.5 Linux 64bit Preinstalled System with AppFirst Monitoring -read:bytes 838707331072 -claim:type shared -drive c157e1eb-aa9c-4dd7-80b8-6fd4a238f2a9 -write:bytes 21474840576 -read:requests 204762532 -os linux - -type disk -size 8589934592 -use dbserver,webserver,email,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free true -bits 64 -drive_type preinstalled -status active -favourite true -install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. -volume 001118bb-dbdb-4ab0-b7db-d4cceb160098 -host 00115b1d-6fe9-40b2-a013-426a6a584ff7 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 2097153 -name Debian 5.0 Preinstalled -url www.debian.org -read:bytes 71179878400 -claim:type shared -drive 9b732c4e-32a3-4369-b5f7-9a0325195baa -write:bytes 8589938688 -read:requests 17377900 -os linux - -type cdrom -claimed 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:7055acf3-8d9a-4a99-a24f-dda1aaf37944:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:0a486768-08c1-419d-ad9c-1c8143df3496:ide:0:0 -size 2248146944 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -bits 64 -email drives@elastichosts.com -drive_type installcd -status active -description - -favourite false -free false -volume 0007aee7-bd5b-4551-9d8f-a958051235a9 -host 00079b57-1b29-4a89-a8d0-1d648fc20804 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Windows Web Server 2008 Trial Install CD -url http://www.microsoft.com -read:bytes 55097581056 -claim:type shared -drive 7aead6d3-c3e6-4940-85c7-f5ee61f6ef2b -write:bytes 0 -read:requests 22364695 -os windows - -type cdrom -claimed 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:6100e29c-708d-4a5b-821b-6a9faa3ba013:ide:0:1 00031836-a624-4b22-bc7d-41ff8977087b:guest:fcde7569-e034-452c-9909-7c485f5d168f:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:1ac4641e-aa67-47f2-a77d-e9c5982d68b2:ide:0:0 0012c12d-72b1-4dfc-ae0f-aeab09881545:guest:300989f8-da5c-42a6-91f8-97e87b85b748:ide:0:1 00016115-af87-452b-a3bf-3affc8a7d934:guest:f679b4ba-a4de-4254-90d1-27396aac8712:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:65e59c8b-579b-4977-b60c-b3b7eb404026:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:76eaf2fe-554a-4d3f-a3ef-a1214e878793:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:030cfdda-9c6c-4622-a68c-2e3588fbe828:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:64a5375a-31cc-414f-9e14-006b5c39b51f:ide:0:0 00059836-5512-4ce2-bf66-4daab2d994e4:guest:83da4fb5-037f-4985-a0f6-f696fa7ff727:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:90f4a2d3-9b76-4444-a1b2-72bbd06fe3e2:ide:0:0 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:cbb4ecc9-654f-4410-aeb4-b9ca602faa01:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:e7ea14b2-aaa0-48b4-b1ac-7c8351c2edf4:ide:0:0 001318df-35c6-439f-8e72-8d57c36ca86b:guest:67f96fa3-8d41-4f8b-8199-4111617d3150:ide:0:1 000663ee-9fb6-4461-90f6-01327a4aff07:guest:245dd0b0-18eb-4e24-b219-9549bafdea87:ide:0:0 000663ee-9fb6-4461-90f6-01327a4aff07:guest:b52e106f-f14c-4312-8597-bcfedf4b0e70:ide:0:0 -size 2663383040 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free false -bits 64 -email drives@elastichosts.com -drive_type installcd -status active -description - -favourite false -install_notes pass:123456 -volume 0007aee7-bd5b-4551-9d8f-a958051235a9 -host 00079b57-1b29-4a89-a8d0-1d648fc20804 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Windows Server 2008 Trial Install CD -url http://www.microsoft.com/windowsserver2008/en/us/default.aspx -read:bytes 78315713024 -claim:type shared -drive f89af28e-ff00-4fc9-a7ed-22e7fa5a88db -write:bytes 0 -read:requests 32289210 -os windows - -status active -name Gentoo Install Minimal amd64 20100408 -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 437561856 -write:bytes 119558144 -claim:type shared -drive 73162606-78ca-4b0a-8f7a-70aa70563d90 -free none -volume 00018aab-c080-4ed3-b52f-459933d34ec9 -host 00016115-af87-452b-a3bf-3affc8a7d934 -os linux -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 79760 -claimed 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:d74c8d2b-a169-486c-adbd-89ca50dccafa:ide:0:1 -type cdrom -write:requests 29189 -size 209715200 - -status active -name Peppermint Ice Linux 32bit Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 1986560 -description Peppermint OS is an Ubuntu-based Linux distribution that aims to be lightning fast and easy on system resources. By employing Mozilla's Prism technology Peppermint integrates seamlessly with Cloud and web-based applications. The distribution's other features include automatic updates, easy step-by-step installation, sleek and user-friendly interface, and increased mobility by integrating directly with Cloud-based applications. -write:bytes 437698560 -claim:type shared -drive f9d92afc-27ff-4139-84c7-ac6655e6f6f1 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00018aab-c080-4ed3-b52f-459933d34ec9 -host 00016115-af87-452b-a3bf-3affc8a7d934 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 485 -free none -type cdrom -write:requests 106860 -size 436207616 - -status active -name Super Gamer Linux 32bit and 64bit Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 137039872 -description SuperGamer is a live DVD based on VectorLinux, intended to showcase gaming on Linux. The distribution is optimised for a gaming computer environment, with some tweaks to help speed up running from the live DVD. Extra games are added along with some demo editions of proprietary games. All games are native Linux games, but users wishing to run Windows games may install WINE or a related emulator, such as Cedega. -write:bytes 8446324736 -claim:type shared -drive d72701b2-01b9-4ac3-9afa-d0afdb6bcf2f -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00018aab-c080-4ed3-b52f-459933d34ec9 -host 00016115-af87-452b-a3bf-3affc8a7d934 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 33457 -free none -type cdrom -write:requests 2062091 -size 8413773824 - -status active -name ZeroShell 1.3 Linux Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 856064 -description Zeroshell is a Linux distribution for servers and embedded devices aimed at providing the main network services a LAN requires. It is available in the form of Live CD or Compact Flash image and you can configure and administer it using your web browser. -write:bytes 153247744 -claim:type shared -drive 44358ce4-0f30-4e48-86d1-e93330961a8a -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n -volume 00023324-4c49-4567-a017-c85c8a6b8313 -host 0002c6df-a1d2-4d1d-96f0-f95405a28183 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 209 -free none -type cdrom -write:requests 37414 -size 155189248 - -status active -name Astaro Security Gateway Firewall Server 8.0 Linux Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 365871104 -description Astaro offers an integrated software solution that provides superior performance in an all-in-one firewall. Its hardened operating system, stateful packet inspection, content filtering (virus & surf protection), application proxies and IPsec based VPN provides a powerful solution to today's security issues. It is designed to maximize your networks security without compromising its performance enabling telecommuters, branch offices, customers and suppliers to safely share critical business information. Our proprietary user interface, WebAdmin allows ease of use and manageability of all open source firewall components, as well as the Up2Date service via the Internet. It is easy to install with all components on one CD achieving simple implementation and integration to existing network environments. -write:bytes 369696768 -claim:type shared -drive 916b0e39-b234-407b-89ab-e8108f05726f -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00023324-4c49-4567-a017-c85c8a6b8313 -host 0002c6df-a1d2-4d1d-96f0-f95405a28183 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 89324 -claimed 000096ce-ff07-413d-912a-aa1a33963802:guest:20911753-98a6-4951-af34-89e157452c84:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:75a96f35-c3fd-492a-a48b-34dcd10987d6:ide:0:0 -free none -type cdrom -write:requests 90258 -size 369098752 - -status active -name Chakra 0.2.2 Linux 64bit Install and Live CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 5451776 -description Chakra GNU/Linux is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. -write:bytes 724774912 -claim:type shared -drive 0e8c8ac2-f791-4764-a964-c6d2679ae49a -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 0000acbf-fa0a-44a6-8018-2f106f96a45f -host 000096ce-ff07-413d-912a-aa1a33963802 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 1331 -free none -type cdrom -write:requests 176947 -size 721420288 - -status active -name Clonezilla Live 1.2.6 64bit -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 876544 -description Clonezilla Live is a Debian-based live CD containing Clonezilla, a partition and disk cloning software similar to Norton Ghost. It saves and restores only used blocks in hard drive. With Clonezilla, one can clone a 5 GB system to 40 clients in about 10 minutes. -write:bytes 134045696 -claim:type shared -drive cec8330f-59c7-4e20-9577-54df28d598e7 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. -volume 0000acbf-fa0a-44a6-8018-2f106f96a45f -host 000096ce-ff07-413d-912a-aa1a33963802 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 214 -free none -type cdrom -write:requests 32726 -size 134217728 - -status active -name Absolute Linux 13.1.2 Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 93573632 -description Absolute Linux is a light-weight modification of Slackware Linux. It includes several utilities that make configuration and maintenance easier and it has many common desktop and Internet applications installed and configured with tight integration of menus, applications and MIME types. Absolute Linux uses IceWM and ROX for its window and file managers. -write:bytes 728211456 -claim:type shared -drive b745638c-87ff-4836-8623-e48e67286494 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 000431a5-46d9-4a67-9c03-3c3402a41992 -host 00043e69-ac57-45b1-8692-75db24064fb9 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 39232 -free none -type cdrom -write:requests 177786 -size 725614592 - -status active -name Sabayon_Linux_5.4_amd64_K.iso -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 12877824 -write:bytes 2160496640 -claim:type shared -drive 75119285-7c20-43f4-9d3b-e6af3f1823e3 -free none -volume 000431a5-46d9-4a67-9c03-3c3402a41992 -host 00043e69-ac57-45b1-8692-75db24064fb9 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 3144 -type cdrom -write:requests 527465 -size 2151677952 - -status active -name FreeBSD 8.1 Linux 64bit Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 60035072 -description About FreeBSD FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. -write:bytes 2315309056 -claim:type shared -drive fb940d5b-b9a0-4f9c-8cb7-94c3378d1676 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00059deb-640a-464e-9509-6a3ec6cfd549 -host 00059836-5512-4ce2-bf66-4daab2d994e4 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 14657 -free none -type cdrom -write:requests 565261 -size 2306867200 - -status active -name BackTrack 4 Release 1 Linux Live CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 4008857600 -description A SLAX-based live CD with a comprehensive collection of security and forensics tools BackTrack 4 R1, a Linux-based penetration testing arsenal for security professionals. -write:bytes 2023919616 -claim:type shared -drive ef152c9c-1460-44f5-b192-8e0524909709 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00059deb-640a-464e-9509-6a3ec6cfd549 -host 00059836-5512-4ce2-bf66-4daab2d994e4 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 978725 -free none -type cdrom -write:requests 494121 -size 2017460224 - -status active -name Vector 6.0 Linux 32bit Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 3035136 -description VECTORLINUX is a small, fast, Intel Linux operating system based on one of the original Linux distributions, Slackware. The enormously popular Slackware is the true "Unix" of Linux distributions and is used by major corporations, universities and home users alike. It's popularity stems from the fact that it is a robust, versatile and almost unbreakable system. Slackware has been traditionally known to be about as user friendly as a coiled rattlesnake and that's where Vector Linux comes into play. We have produced a bloat free, easy to install, configure and maintain Slackware based system that is second to none. -write:bytes 729059328 -claim:type shared -drive c2a757b9-dfd0-432c-bb29-b380b4dd6fb6 -free none -volume 00059deb-640a-464e-9509-6a3ec6cfd549 -host 00059836-5512-4ce2-bf66-4daab2d994e4 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 741 -type cdrom -write:requests 177993 -size 729808896 - -status active -name PCBSD 8.1 Linux 64bit Install CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 15228928 -description PC-BSD has as its goals to be an easy-to-install-and-use desktop operating system, based on FreeBSD. To accomplish this, it currently has a graphical installation, which will enable even UNIX novices to easily install and get it running. It will also come with KDE pre-built, so that the desktop can be used immediately. Currently in development is a graphical software installation program, which will make installing pre-built software as easy as other popular operating systems. -write:bytes 3794726912 -claim:type shared -drive 802fbcab-2723-469c-b775-6fdeb21287da -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 3718 -free none -type cdrom -write:requests 926447 -size 3783262208 - -status active -name nst-2.13.0.x86_64.iso -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 7503872 -write:bytes 1436717056 -claim:type shared -drive 9d04c648-712d-4076-bd99-70088d85fe01 -free none -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 1832 -type cdrom -write:requests 350761 -size 1430257664 - -status active -name Peppermint-Ice-10012010.iso -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 2613248 -write:bytes 452710400 -claim:type shared -drive 2e79eeee-b4ad-4dcf-a072-86dcede6af1b -free none -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 638 -type cdrom -write:requests 110525 -size 452984832 - -status active -name Sabayon_Linux_5.4_amd64_K.iso -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 14082048 -write:bytes 2161713152 -claim:type shared -drive 07e2a6df-8389-4130-a003-edacc19a9ee3 -free none -volume 00065289-b9c8-4548-8d83-e1891f831f51 -host 000663ee-9fb6-4461-90f6-01327a4aff07 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 3438 -type cdrom -write:requests 527762 -size 2151677952 - -type cdrom -claimed 00031836-a624-4b22-bc7d-41ff8977087b:guest:ffe02269-b653-47ad-ab21-a02805b24904:ide:0:0 000096ce-ff07-413d-912a-aa1a33963802:guest:1f378a18-1b59-40e7-8e9a-7f81d7eda6b8:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:8c13b69d-6d11-4151-975b-a2f084c7ada7:ide:0:0 00166b98-6431-40ad-94b0-244881ff87d5:guest:1705b116-aac2-449a-b0de-3dd4ab7e765f:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:30d887ee-ed96-4c32-a1a8-5ab49abd2a7e:ide:0:1 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:bcea8695-baeb-476e-8089-475ce8948646:ide:0:1 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:df1368af-05a3-4ad5-8017-54be3ea70232:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:3569d646-7ae5-410f-b66e-64bba1381cba:ide:0:0 -size 2663383040 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -bits 64 -drive_type installcd -status active -description - -favourite false -free false -volume 0009c669-9ea6-4825-b788-b40902bb1902 -host 000932a7-c74f-4de3-bfc4-227435f78998 -encryption:cipher aes-xts-plain -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 0 -name Windows Server 2008 Trial Install CD -url http://www.microsoft.com/windowsserver2008/en/us/default.aspx -read:bytes 5261708288 -claim:type shared -drive 7e23b099-dd35-446b-8d90-2953643b664f -write:bytes 0 -read:requests 1883649 -os windows - -status active -name Unity Linux 64bit Install and Live CD -readers ffffffff-ffff-ffff-ffff-ffffffffffff -favourite none -read:bytes 147034112 -description The community-oriented Unity Linux is a minimalist distribution and live CD based on Mandriva Linux. The project's main goal is to create a base operating system from which more complete, user-oriented distribution can easily be built - either by other distribution projects or by the users themselves. Unity Linux uses Openbox as the default window manager. Its package management is handled via Smart and RPM 5 which can download and install additional software packages from the project's online repository. -write:bytes 290488320 -claim:type shared -drive d235dada-407c-4105-b4ef-636eb7604404 -install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. -volume 00106cda-0e17-40c8-a576-b516f0eb67bc -host 00109617-2c6b-424b-9cfa-5b572c17bafe -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -read:requests 35897 -free none -type cdrom -write:requests 70920 -size 289406976 - -type disk -licenses msft_p73_04837 msft_tfa_00009 -size 21474836480 -use dbserver,general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free false -bits 64 -drive_type preinstalled -status active -description Please refer to the install notes for a full guide to initial configuration. -favourite false -install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n -volume 00023324-4c49-4567-a017-c85c8a6b8313 -host 0002c6df-a1d2-4d1d-96f0-f95405a28183 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 5242881 -name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub -url http://www.microsoft.com/sqlserver/2008/en/us/ -read:bytes 49172439040 -claim:type shared -drive 7b013f8c-dd4c-4701-b1ca-936506dc37ca -write:bytes 21474840576 -read:requests 12004990 -os windows - -type disk -licenses msft_lwa_00135 -size 13958643712 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free false -bits 64 -drive_type preinstalled -status active -description Please refer to the install notes for a full guide to initial configuration. -favourite false -install_notes ***You must update the default Administrator password for Windows Web Server 2008***\n\nPre-installed Windows Server 2008 Web R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" -volume 00120946-d7a4-486e-867e-8348bebe0b95 -host 0012c12d-72b1-4dfc-ae0f-aeab09881545 -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 3407873 -name Windows Server Web 2008 R2 64bit English -url http://www.microsoft.com/windowsserver2008/en/us/default.aspx -read:bytes 145252270080 -claim:type shared -drive 71697799-c611-41b9-93be-f79152aefbe5 -write:bytes 13958647808 -read:requests 35461980 -os windows - -type disk -licenses msft_p73_04837 -size 13958643712 -use general -readers ffffffff-ffff-ffff-ffff-ffffffffffff -free false -bits 64 -drive_type preinstalled -status active -description Please refer to the install notes for a full guide to initial configuration. -favourite false -install_notes ***You must update the default Administrator password for Windows Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" -volume 0013fc75-b170-4d62-abaf-804b8fc466cc -host 001318df-35c6-439f-8e72-8d57c36ca86b -user 00000000-0000-0000-0000-000000000001 -autoexpanding false -write:requests 3407873 -name Windows Server Standard 2008 R2 64bit English -url http://www.microsoft.com/windowsserver2008/en/us/default.aspx -read:bytes 257073537024 -claim:type shared -drive 0611be3f-0607-4b3c-8bad-a0af392d928a -write:bytes 13958647808 -read:requests 62762094 -os windows diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/resources_ip_create.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/resources_ip_create.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/resources_ip_create.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/resources_ip_create.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -resource 1.2.3.4 -netmask 255.255.255.0 -nameserver 91.203.56.1 -user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b -type ip -gateway 91.203.56.1 - -resource 1.2.3.5 -netmask 255.255.255.0 -nameserver 91.203.56.1 -user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b -type ip -gateway 91.203.56.1 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/resources_ip_list.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/resources_ip_list.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/resources_ip_list.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/resources_ip_list.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -ip 1.2.3.4 -ip 1.2.3.5 -ip 1.2.3.6 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_create.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_create.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_create.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_create.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -ide:0:0:write:requests 466 -rx 760681 -vnc:password testpass -ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 -ide:0:0:read:requests 7467 -ide:0:0:read:bytes 165395968 -vnc:ip 178.22.66.28 -tx:packets 32 -tx 2568 -boot ide:0:0 -smp 1 -started 1286568422 -nic:0:model virtio -status active -mem 640 -rx:packets 12662 -user 93b34fd9-7986-4b25-8bfd-98a50383605d -ide:0:0:media disk -name cloudsigma node -persistent true -nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 -server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 -nic:0:dhcp 1.2.3.4 -nic:1:dhcp 1.2.3.5 -ide:0:0:write:bytes 7358464 -cpu 1100 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_info.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_info.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_info.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_info.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -ide:0:0:write:requests 466 -rx 760681 -vnc:password testpass -ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 -ide:0:0:read:requests 7467 -ide:0:0:read:bytes 165395968 -vnc:ip 178.22.66.28 -tx:packets 32 -tx 2568 -boot ide:0:0 -smp 1 -started 1286568422 -nic:0:model virtio -status active -mem 640 -rx:packets 12662 -user 93b34fd9-7986-4b25-8bfd-98a50383605d -ide:0:0:media disk -name cloudsigma node -persistent true -nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 -server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 -nic:0:dhcp 1.2.3.4 -nic:1:dhcp 1.2.3.5 -ide:0:0:write:bytes 7358464 -cpu 1100 diff -Nru libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_set.txt libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_set.txt --- libcloud-0.5.0/test/compute/fixtures/cloudsigma/servers_set.txt 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/cloudsigma/servers_set.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -ide:0:0:write:requests 466 -rx 760681 -vnc:password testpass -ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 -ide:0:0:read:requests 7467 -ide:0:0:read:bytes 165395968 -vnc:ip 178.22.66.28 -tx:packets 32 -tx 2568 -boot ide:0:0 -smp 2 -started 1286568422 -nic:0:model virtio -status active -mem 640 -rx:packets 12662 -user 93b34fd9-7986-4b25-8bfd-98a50383605d -ide:0:0:media disk -name cloudsigma node -persistent true -nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 -server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 -nic:0:dhcp 1.2.3.4 -nic:1:dhcp 1.2.3.5 -ide:0:0:write:bytes 7358464 -cpu 1100 diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/create_tags.xml libcloud-0.15.1/test/compute/fixtures/ec2/create_tags.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/create_tags.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/create_tags.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - b001068a-ca0d-4f05-b622-28fe984f44be - true - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/delete_tags.xml libcloud-0.15.1/test/compute/fixtures/ec2/delete_tags.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/delete_tags.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/delete_tags.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - 7a297da7-3ecb-4156-8bcb-3be73896cc14 - true - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses_multi.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses_multi.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses_multi.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses_multi.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - 1.2.3.4 - i-4382922a - - - 1.2.3.6 - i-4382922b - - - 1.2.3.5 - i-4382922b - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses_single.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses_single.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses_single.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses_single.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - 1.2.3.4 - i-4382922a - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_addresses.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_addresses.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - 1.2.3.4 - i-4382922a - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_availability_zones.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_availability_zones.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_availability_zones.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_availability_zones.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ - - cc0dfb29-efef-451c-974f-341b3edfb28f - - - eu-west-1a - available - eu-west-1 - - - - eu-west-1b - available - eu-west-1 - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_images.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_images.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_images.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_images.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - - - ami-be3adfd7 - ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml - available - 206029621532 - false - i386 - machine - aki-4438dd2d - ari-4538dd2c - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_instances.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_instances.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_instances.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_instances.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ - - 56d0fffa-8819-4658-bdd7-548f143a86d2 - - - r-07adf66e - 822272953071 - - - default - - - - - i-4382922a - ami-0d57b264 - - 0 - pending - - - - - 1.2.3.5 - 1.2.3.5 - 0 - - m1.small - 2009-08-07T05:47:04.000Z - - us-east-1a - - - disabled - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/describe_tags.xml libcloud-0.15.1/test/compute/fixtures/ec2/describe_tags.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/describe_tags.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/describe_tags.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ - - fa7e0e44-df5e-49a0-98d7-5d4d19a29f95 - - - i-4382922a - instance - tag - test one - - - i-4382922a - instance - owner - libcloud - - - i-4382922a - instance - stack - Production - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/modify_instance_attribute.xml libcloud-0.15.1/test/compute/fixtures/ec2/modify_instance_attribute.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/modify_instance_attribute.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/modify_instance_attribute.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/reboot_instances.xml libcloud-0.15.1/test/compute/fixtures/ec2/reboot_instances.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/reboot_instances.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/reboot_instances.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - 76dabb7a-fb39-4ed1-b5e0-31a4a0fdf5c0 - true - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml libcloud-0.15.1/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ - - - - - IdempotentParameterMismatch - - Arguments on this idempotent request are inconsistent with arguments used in previous request(s). - - - - 5dabd361-d2e0-4f79-937d-4b2852a3b719 - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/run_instances_idem.xml libcloud-0.15.1/test/compute/fixtures/ec2/run_instances_idem.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/run_instances_idem.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/run_instances_idem.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ - - r-47a5402e - AIDADH4IGTRXXKCD - - - default - - - - - i-2ba64342 - ami-be3adfd7 - - 0 - pending - - - - example-key-name - 0 - m1.small - 2007-08-07T11:51:50.000Z - - us-east-1b - - - true - - testclienttoken - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/run_instances.xml libcloud-0.15.1/test/compute/fixtures/ec2/run_instances.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/run_instances.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/run_instances.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ - - r-47a5402e - AIDADH4IGTRXXKCD - - - default - - - - - i-2ba64342 - ami-be3adfd7 - - 0 - pending - - - - example-key-name - 0 - m1.small - 2007-08-07T11:51:50.000Z - - us-east-1b - - - true - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ec2/terminate_instances.xml libcloud-0.15.1/test/compute/fixtures/ec2/terminate_instances.xml --- libcloud-0.5.0/test/compute/fixtures/ec2/terminate_instances.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ec2/terminate_instances.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - fa63083d-e0f7-4933-b31a-f266643bdee8 - - - i-4382922a - - 32 - shutting-down - - - 16 - running - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/htemplate_list.json libcloud-0.15.1/test/compute/fixtures/ecp/htemplate_list.json --- libcloud-0.5.0/test/compute/fixtures/ecp/htemplate_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/htemplate_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{"templates": [ - -{"uuid": "1", "hypervisor_name": "kvm-hvm", "cpus": 1, "memory": 512, "arch": "i686", "id": 1, "name": "Small"}, - -{"uuid": "2", "hypervisor_name": "kvm-hvm", "cpus": 2, "memory": 1024, "arch": "i686", "id": 2, "name": "Medium"}, - -{"uuid": "3", "hypervisor_name": "kvm-hvm", "cpus": 3, "memory": 2048, "arch": "x86_64", "id": 3, "name": "Large"} - -], "errno": 0, "message": "Success"} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/network_list.json libcloud-0.15.1/test/compute/fixtures/ecp/network_list.json --- libcloud-0.5.0/test/compute/fixtures/ecp/network_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/network_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"errno": 0, "message": "Success", "networks": [{"uuid": "1", "vlan_id": null, "name": "Default"}]} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/ptemplate_list.json libcloud-0.15.1/test/compute/fixtures/ecp/ptemplate_list.json --- libcloud-0.5.0/test/compute/fixtures/ecp/ptemplate_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/ptemplate_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{"errno": 0, "message": "Success", "packages": [ - -{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "1", "name": "centos54"}, - -{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "2", "name": "centos54 two"} -]} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_delete.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_delete.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_delete.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_delete.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"errno": 0, "message": "Success"} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_start.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_start.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_start.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_start.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -{"errno": 0, "message": "Success", "vm": -{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.12", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_stop.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_stop.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_action_stop.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_action_stop.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -{"errno": 0, "message": "Success", "vm": -{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_get.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_get.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_1_get.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_1_get.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -{"errno": 0, "message": "Success", "vm": -{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "off", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_list.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_list.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{"errno": 0, "message": "Success", "vms": -[ -{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, - -{"vnc_enabled": true, "uuid": 2, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:72:b4:71:21", "network_name": "Default", "uuid": "c76edd61-2dfd-11df-84ca-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5902", "name": "dummy-2", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "zoiZW31T", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, - -{"vnc_enabled": true, "uuid": 3, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "stopped", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} - -] -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ecp/vm_put.json libcloud-0.15.1/test/compute/fixtures/ecp/vm_put.json --- libcloud-0.5.0/test/compute/fixtures/ecp/vm_put.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ecp/vm_put.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"errno": 0, "message": "Success", "txid": "fc38963c-a9fa-11de-8c4b-001baaa56c51", "machine_id": "1234"} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/elastichosts/drives_create.json libcloud-0.15.1/test/compute/fixtures/elastichosts/drives_create.json --- libcloud-0.5.0/test/compute/fixtures/elastichosts/drives_create.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/elastichosts/drives_create.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "drive": "0012e24a-6eae-4279-9912-3432f698cec8", - "encryption:cipher": "aes-xts-plain", - "name": "test drive", - "read:bytes": "4096", - "read:requests": "1", - "size": 10737418240, - "status": "active", - "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", - "write:bytes": "4096", - "write:requests": "1" -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/elastichosts/drives_info.json libcloud-0.15.1/test/compute/fixtures/elastichosts/drives_info.json --- libcloud-0.5.0/test/compute/fixtures/elastichosts/drives_info.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/elastichosts/drives_info.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "drive": "0012e24a-6eae-4279-9912-3432f698cec8", - "encryption:cipher": "aes-xts-plain", - "name": "test drive", - "read:bytes": "4096", - "read:requests": "1", - "size": 10737418240, - "status": "active", - "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", - "write:bytes": "4096", - "write:requests": "1" -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/elastichosts/servers_create.json libcloud-0.15.1/test/compute/fixtures/elastichosts/servers_create.json --- libcloud-0.5.0/test/compute/fixtures/elastichosts/servers_create.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/elastichosts/servers_create.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -{ - "boot": "ide:0:0", - "cpu": 2000, - "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", - "ide:0:0:read:bytes": "299696128", - "ide:0:0:read:requests": "73168", - "ide:0:0:write:bytes": "321044480", - "ide:0:0:write:requests": "78380", - "mem": 1024, - "name": "test api node", - "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", - "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], - "nic:0:model": "virtio", - "rx": 679560, - "rx:packets": 644, - "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", - "smp": 1, - "started": 1280723696, - "status": "active", - "tx": 21271, - "tx:packets": "251", - "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", - "vnc:ip": "216.151.208.174", - "vnc:password": "testvncpass" -} \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/elastichosts/servers_info.json libcloud-0.15.1/test/compute/fixtures/elastichosts/servers_info.json --- libcloud-0.5.0/test/compute/fixtures/elastichosts/servers_info.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/elastichosts/servers_info.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -[ - { - "boot": "ide:0:0", - "cpu": 2000, - "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", - "ide:0:0:read:bytes": "299696128", - "ide:0:0:read:requests": "73168", - "ide:0:0:write:bytes": "321044480", - "ide:0:0:write:requests": "78380", - "mem": 1024, - "name": "test api node", - "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", - "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], - "nic:0:model": "virtio", - "rx": 679560, - "rx:packets": 644, - "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", - "smp": 1, - "started": 1280723696, - "status": "active", - "tx": 21271, - "tx:packets": "251", - "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", - "vnc:ip": "216.151.208.174", - "vnc:password": "testvncpass" - } -] \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/account_info.xml libcloud-0.15.1/test/compute/fixtures/gandi/account_info.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/account_info.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/account_info.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,317 +0,0 @@ - - - - - - -handle -AB3917-GANDI - - -products - - - -errors_for_updating - -product_name_does_not_match -no_action_on_free_product - - - -can_release -1 - - -date_end - - - -product_name -shares_fixed - - -autorenew - - - -errors_for_removing - - - - -errors_for_releasing - -no_action_on_free_product -not_available_resource - - - -is_in_redemption - - - -errors_for_autorenewing - -no_action_on_free_product - - - -duration -1y - - -date_created -20101028T12:38:17 - - -quantity -12 - - -errors_for_renewing - -no_action_on_free_product - - - -id -11153 - - -redemption -7 - - - - -errors_for_updating - -no_action_on_free_product - - - -can_release -0 - - -date_end - - - -product_name -ips - - -autorenew - - - -errors_for_removing - - - - -errors_for_releasing - -no_action_on_free_product -db_can_not_release - - - -is_in_redemption - - - -errors_for_autorenewing - -no_action_on_free_product - - - -duration -1m - - -date_created -20110124T11:42:35 - - -quantity -4 - - -errors_for_renewing - -no_action_on_free_product - - - -id -11196 - - -redemption -7 - - - - - -share_definition - - -servers -1 - - -bandwidth -5120.0 - - -memory -256 - - -cores -0.25 - - -slots -0.66666666666666663 - - -disk -8192 - - - - -fullname -Aymeric Barantal - - -id -58757 - - -resources - - -available - - -shares -12 - - -servers -8 - - -ips -4 - - -bandwidth -51200.0 - - -memory -2560 - - -cores -3.0 - - -slots -4.0 - - -disk -89088 - - - - -granted - - -shares -12 - - -servers -12 - - -ips -8 - - -bandwidth -61440 - - -memory -3072 - - -cores -5.0 - - -slots -8.0 - - -disk -98304 - - - - -used - - -servers -4 - - -ips -4 - - -bandwidth -10240.0 - - -memory -512 - - -cores -2.0 - - -slots -4 - - -disk -9216 - - - - -expired - - - - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/datacenter_list.xml libcloud-0.15.1/test/compute/fixtures/gandi/datacenter_list.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/datacenter_list.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/datacenter_list.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ - - - - - - - - - - - country - France - - - iso - FR - - - id - 1 - - - name - Equinix Paris - - - - - - - country - United States of America - - - iso - US - - - id - 2 - - - name - Level3 Baltimore - - - - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/image_list_dc0.xml libcloud-0.15.1/test/compute/fixtures/gandi/image_list_dc0.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/image_list_dc0.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/image_list_dc0.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,493 +0,0 @@ - - - - - - - - - - - date_updated - 20100928T10:41:38 - - - disk_id - 34198 - - - label - GandiOS - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 2 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11233 - - - label - Mandriva 2008.0 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 3 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11235 - - - label - Centos 5 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 4 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11236 - - - label - Fedora Core 7 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 5 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11237 - - - label - Open SUSE 10.3 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 6 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11238 - - - label - Debian 4 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20070101T00:00:00 - - - author_id - 248842 - - - id - 7 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11239 - - - label - Fedora Core 8 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20080101T00:00:00 - - - author_id - 248842 - - - id - 8 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11240 - - - label - Open SUSE 11.0 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20080101T00:00:00 - - - author_id - 248842 - - - id - 9 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11241 - - - label - Mandriva 2008.1 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20080101T00:00:00 - - - author_id - 248842 - - - id - 10 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 11242 - - - label - Ubuntu 8.04 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20080101T00:00:00 - - - author_id - 248842 - - - id - 11 - - - - - - - date_updated - 20100922T11:56:05 - - - disk_id - 23351 - - - label - Debian 5 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20090101T00:00:00 - - - author_id - 248842 - - - id - 12 - - - - - - - date_updated - 20100811T16:30:06 - - - disk_id - 23352 - - - label - Ubuntu 9.04 - - - datacenter_id - 1 - - - visibility - all - - - os_arch - x86-32 - - - date_created - 20090101T00:00:00 - - - author_id - 248842 - - - id - 13 - - - - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/ip_list.xml libcloud-0.15.1/test/compute/fixtures/gandi/ip_list.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/ip_list.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/ip_list.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,261 +0,0 @@ - - - - - - - -reverse -xvm-6-186.ghst.net - - -iface_id -7857 - - -date_updated -20110120T14:57:55 - - -ip -10.5.6.186 - - -datacenter_id -1 - - -state -created - - -num -0 - - -version -4 - - -date_created -20101028T12:49:11 - - -id -9256 - - - - -reverse -xvm6-fe37-9f7b.ghst.net - - -iface_id -7857 - - -date_updated -20110120T14:58:44 - - -ip -2001:4b98:dc0:543:216:3eff:fe37:9f7b - - -datacenter_id -1 - - -state -created - - -num -1 - - -version -6 - - -date_created -20110120T14:58:44 - - -id -9294 - - - - -reverse -xvm-6-179.ghst.net - - -iface_id -7861 - - -date_updated -20110124T15:53:44 - - -ip -10.5.6.179 - - -datacenter_id -1 - - -state -created - - -num -0 - - -version -4 - - -date_created -20110124T11:43:17 - - -id -9298 - - - - -reverse -xvm6-fea8-3724.ghst.net - - -iface_id -7861 - - -date_updated -20110124T15:54:44 - - -ip -2001:4b98:dc0:543:216:3eff:fea8:3724 - - -datacenter_id -1 - - -state -created - - -num -1 - - -version -6 - - -date_created -20110124T15:54:44 - - -id -9301 - - - - -reverse - - - -iface_id - - - -date_updated -20110217T17:39:39 - - -ip - - - -datacenter_id -1 - - -state -being_created - - -num - - - -version -4 - - -date_created -20110217T17:39:39 - - -id -9323 - - - - -reverse -xvm-6-26.ghst.net - - -iface_id - - - -date_updated -20110225T11:59:55 - - -ip -10.5.6.26 - - -datacenter_id -1 - - -state -created - - -num -0 - - -version -4 - - -date_created -20110224T16:46:33 - - -id -9332 - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/operation_info.xml libcloud-0.15.1/test/compute/fixtures/gandi/operation_info.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/operation_info.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/operation_info.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ - - - - - - -date_updated -20110324T15:49:50 - - -last_error - - - -date_start - - - -source -AB3917-GANDI - - -step -DONE - - -eta -39 - - -date_created -20110324T15:49:32 - - -type -vm_delete - - -id -637366 - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_create_from.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_create_from.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_create_from.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_create_from.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ - - - - - - - -iface_id - - - -date_updated -20110324T17:14:06 - - -type -disk_create - - -date_start - - - -disk_id -35170 - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id - - - -date_created -20110324T17:14:06 - - -vm_id - - - -id -637370 - - - - -iface_id -8019 - - -date_updated -20110324T17:14:06 - - -vm_id - - - -date_start - - - -disk_id - - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id -9298 - - -date_created -20110324T17:14:06 - - -type -iface_create - - -id -637371 - - - - -iface_id - - - -date_updated -20110324T17:14:07 - - -type -vm_create - - -date_start - - - -disk_id - - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id - - - -date_created -20110324T17:14:07 - - -vm_id -250288 - - -id -637372 - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_delete.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_delete.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_delete.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_delete.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ - - - - - - -iface_id - - - -date_updated -20110324T15:49:32 - - -vm_id -250136 - - -date_start - - - -disk_id - - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id - - - -date_created -20110324T15:49:32 - - -type -vm_delete - - -id -637366 - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_info.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_info.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_info.xml 2011-04-04 00:09:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_info.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,330 +0,0 @@ - - - - - - -memory -256 - - -hostname -test2 - - -console -0 - - -description - - - -triggers - - - - -date_updated -20110120T15:25:07 - - -disks - - - -datacenter_id -1 - - -name -test2 - - -kernel_version -2.6.32 - - -can_snapshot - - - -kernel_cmdline - - -root -/dev/xvda1 - - -ro -1 - - -console -xvc0 - - -nosep -1 - - - - -visibility -private - - -label -Debian 5 - - -vms_id - -250133 - - - -source -23351 - - -state -running - - -is_boot_disk -1 - - -date_updated -20110120T15:02:01 - - -date_created -20110120T14:57:55 - - -type -data - - -id -34951 - - -size -3072 - - - - - -disks_id - -34951 - - - -datacenter_id -1 - - -state -running - - -flex_shares -0 - - -ai_active -0 - - -vm_max_memory -2048 - - -ifaces - - - -date_updated -20110120T14:58:44 - - -vm_id -250133 - - -bandwidth -5120.0 - - -datacenter_id -1 - - -ips - - - -reverse -xvm-6-186.ghst.net - - -iface_id -7857 - - -date_updated -20110120T14:57:55 - - -ip -10.5.6.186 - - -datacenter_id -1 - - -state -created - - -num -0 - - -version -4 - - -date_created -20101028T12:49:11 - - -id -9256 - - - - -reverse -xvm6-fe37-9f7b.ghst.net - - -iface_id -7857 - - -date_updated -20110120T14:58:44 - - -ip -2001:4b98:dc0:543:216:3eff:fe37:9f7b - - -datacenter_id -1 - - -state -created - - -num -1 - - -version -6 - - -date_created -20110120T14:58:44 - - -id -9294 - - - - - -state -used - - -num -0 - - -ips_id - -9256 -9294 - - - -date_created -20110120T14:57:55 - - -type -public - - -id -7857 - - - - - -cores -1 - - -ifaces_id - -7857 - - - -graph_urls - - -vcpu - -http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vcpu&device_number=0 - - - -vdi - -http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vdi&device_number=0 - - - -vif - -http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vif&device_number=0 - - - - - -date_created -20110120T14:57:55 - - -id -250133 - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_list.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_list.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_list.xml 2011-03-24 19:47:42.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_list.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ - - - - - - - -memory -256 - - -console -0 - - -description - - - -date_updated -20110120T15:25:07 - - -hostname -test1 - - -disks_id - -34951 - - - -datacenter_id -1 - - -state -running - - -flex_shares -0 - - -ai_active -0 - - -vm_max_memory -2048 - - -cores -1 - - -ifaces_id - -7857 - - - -date_created -20110120T14:57:55 - - -id -250133 - - - - -memory -256 - - -console -0 - - -description - - - -date_updated -20110225T12:09:31 - - -hostname -test2 - - -disks_id - -34954 - - - -datacenter_id -1 - - -state -halted - - -flex_shares -0 - - -ai_active -0 - - -vm_max_memory -2048 - - -cores -1 - - -ifaces_id - -7861 - - - -date_created -20110124T15:53:44 - - -id -250136 - - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_reboot.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_reboot.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_reboot.xml 2011-04-04 00:09:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_reboot.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ - - - - - - -iface_id - - - -date_updated -20110325T13:18:27 - - -vm_id -250133 - - -date_start - - - -disk_id - - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id - - - -date_created -20110325T13:18:27 - - -type -vm_reboot - - -id -637398 - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gandi/vm_stop.xml libcloud-0.15.1/test/compute/fixtures/gandi/vm_stop.xml --- libcloud-0.5.0/test/compute/fixtures/gandi/vm_stop.xml 2011-04-04 00:09:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gandi/vm_stop.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ - - - - - - -iface_id - - - -date_updated -20110325T13:19:52 - - -vm_id -250133 - - -date_start - - - -disk_id - - - -source -AB3917-GANDI - - -step -WAIT - - -ip_id - - - -date_created -20110325T13:19:52 - - -type -vm_stop - - -id -637399 - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/image_list.json libcloud-0.15.1/test/compute/fixtures/gogrid/image_list.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/image_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/image_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -{ - "list": [ - { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - { - "billingtokens": [ - { - "id": 47, - "name": "CentOS 5.3 64bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (64-bit) w/ None", - "friendlyName": "CentOS 5.3 (64-bit) w/ None", - "id": 1532, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img", - "name": "centos5.3_64_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (64-bit)", - "id": 17, - "name": "CentOS 5.3 (64-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789076417 - }, - { - "billingtokens": [ - { - "id": 48, - "name": "RHEL 5.4 32bit", - "price": 0 - } - ], - "description": "RHEL 5.4 (32-bit) w/ None", - "friendlyName": "RHEL 5.4 (32-bit) w/ None", - "id": 1533, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-4c88cb92-dd7b-4bb1-95b6-7cc93eb1d2aa.img", - "name": "rhel5.4_32_base", - "object": "serverimage", - "os": { - "description": "RHEL 5.4 (32-bit)", - "id": 18, - "name": "RHEL 5.4 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789076417 - }, - { - "billingtokens": [ - { - "id": 49, - "name": "RHEL 5.4 64bit", - "price": 0 - } - ], - "description": "RHEL 5.4 (64-bit) w/ None", - "friendlyName": "RHEL 5.4 (64-bit) w/ None", - "id": 1534, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2bd8ddb3-cc53-4a76-8188-0dce7537a422.img", - "name": "rhel5.4_64_base", - "object": "serverimage", - "os": { - "description": "RHEL 5.4 (64-bit)", - "id": 19, - "name": "RHEL 5.4 (64-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789076417 - } - ], - "method": "/grid/image/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 59, - "start": 0, - "total": 59 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/image_save.json libcloud-0.15.1/test/compute/fixtures/gogrid/image_save.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/image_save.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/image_save.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -{ - "list": [ - { - "architecture": { - "description": "32 bit OS", - "id": 1, - "name": "32-bit", - "object": "option" - }, - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "object": "billingtoken", - "price": 0 - } - ], - "createdTime": 1289119839685, - "description": "", - "friendlyName": "testimage", - "id": 5050, - "isActive": true, - "isPublic": false, - "location": "123/GSI-3ee65927-f80d-43df-92df-6c7e352f009c.img", - "name": "GSI-3ee65927-f80d-43df-92df-6c7e352f009c", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": 123, - "name": "name", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is being saved", - "id": 1, - "name": "Saving", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1289119839685 - } - ], - "method": "/grid/image/save", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/ip_list_empty.json libcloud-0.15.1/test/compute/fixtures/gogrid/ip_list_empty.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/ip_list_empty.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/ip_list_empty.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "list": [ - ], - "method": "/grid/ip/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 0, - "start": 0, - "total": 0 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/ip_list.json libcloud-0.15.1/test/compute/fixtures/gogrid/ip_list.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/ip_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/ip_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 5348099, - "ip": "192.168.75.66", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "192.168.75.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 5348100, - "ip": "192.168.75.67", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.75.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 5348101, - "ip": "192.168.75.68", - "object": "ip", - "public": false, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "192.168.75.64/255.255.255.240" - } - ], - "method": "/grid/ip/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 3, - "start": 0, - "total": 3 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json libcloud-0.15.1/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -{ - "list": [ - { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - } - ], - "method": "/common/lookup/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 2, - "start": 0, - "total": 2 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/password_list.json libcloud-0.15.1/test/compute/fixtures/gogrid/password_list.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/password_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/password_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -{ - "list": [ - { - "password": "bebebe", - "object": "password", - "username": "root", - "server": { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - } - ], - "method": "/grid/server/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_add.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_add.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_add.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_add.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -{ - "list": [ - { - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/add", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_delete.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_delete.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_delete.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_delete.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -{ - "list": [ - { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/delete", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_edit.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_edit.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_edit.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_edit.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -{ - "list": [ - { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/edit", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_list.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_list.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_list.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -{ - "list": [ - { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_power_fail.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_power_fail.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_power_fail.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_power_fail.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -{ - "list": [ - { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/power", - "status": "failure", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/gogrid/server_power.json libcloud-0.15.1/test/compute/fixtures/gogrid/server_power.json --- libcloud-0.5.0/test/compute/fixtures/gogrid/server_power.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/gogrid/server_power.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -{ - "list": [ - { - "id": 90967, - "image": { - "billingtokens": [ - { - "id": 46, - "name": "CentOS 5.3 32bit", - "price": 0 - } - ], - "description": "CentOS 5.3 (32-bit) w/ None", - "friendlyName": "CentOS 5.3 (32-bit) w/ None", - "id": 1531, - "isActive": true, - "isPublic": true, - "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", - "name": "centos5.3_32_base", - "object": "serverimage", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "owner": { - "id": -1, - "name": "GoGrid", - "object": "customer" - }, - "price": 0, - "state": { - "description": "Image is available for adds", - "id": 2, - "name": "Available", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - }, - "updatedTime": 1257789046453 - }, - "ip": { - "id": 1659927, - "ip": "192.168.0.202", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "192.168.0.192/255.255.255.240" - }, - "isSandbox": false, - "name": "test1", - "object": "server", - "os": { - "description": "CentOS 5.3 (32-bit)", - "id": 16, - "name": "CentOS 5.3 (32-bit)", - "object": "option" - }, - "ram": { - "description": "Server with 512MB RAM", - "id": 1, - "name": "512MB", - "object": "option" - }, - "state": { - "description": "Server is in active state.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "Web or Application Server", - "id": 1, - "name": "Web Server", - "object": "option" - } - } - ], - "method": "/grid/server/power", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/create.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/create.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/create.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/create.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -28558128558RationalInsight4woodser@us.ibm.com11LARGEMyPublicKey02010-04-19T10:03:34.327-04:002010-04-26T10:03:43.610-04:00SUSE Linux Enterprise10 SP2OS diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/delete.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/delete.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/delete.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/delete.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/images.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/images.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/images.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/images.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ - -2fd2d0478b132490897526b9b4433a334Rational Build Forge Agent11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00384e900960c3d4b648fa6d4670aed2cd1SUSE 10 SP211SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2SuSE v10.2 Base OS Imagehttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0015a72d3e7bb1cb4942ab0da2968e2e77bbWebSphere Application Server and Rational Agent Controller11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere Application Server and Rational Agent Controller enables a performance based foundation to build, reuse, run, integrate and manage Service Oriented Architecture (SOA) applications and services.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00117da905ba0fdf4d8b8f94e7f4ef43c1beRational Insight11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Insight helps organizations reduce time to market, improve quality, and take greater control of software and systems development and delivery. It provides objective dashboards and best practice metrics to identify risks, status, and trends.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/GettingStarted.htmlLARGE2009-04-25T00:00:00.000-04:0018edf7ad43f75943b1b0c0f915dba8d86cDB2 Express-C11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2DB2 Express-C is an entry-level edition of the DB2 database server for the developer community. It has standard relational functionality and includes pureXML, and other features of DB2 for Linux, Unix, and Windows (LUW).https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0021c03be6800bf043c0b44c584545e04099Informix Dynamic Server Developer Edition11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Informix Dynamic Server (IDS) Developer Edition is a development version of the IDS Enterprise Edition. IDS is designed to meet the database server needs of small-size to large-size enterprise businesses.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00229b2b6482ba374a6ab4bb3585414a910aWebSphere sMash with AppBuilder11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere sMashĀ® provides a web platform that includes support for dynamic scripting in PHP and Groovy.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:001000150416662e71fae44bdba4d7bb502a09c5e7DB2 Enterprise V9.7 (32-bit, 90-day trial)11leonsp@ca.ibm.comPUBLICi386SuSE v10.2DB2 Enterprise V9.7 (32-bit, 90-day trial)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-09T17:01:28.000-05:00100020639da8863714964624b8b13631642c785bRHEL 5.4 Base OS11youngdj@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Red Hat Enterprise Linux 5.4 Base OShttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-18T13:51:12.000-05:0010002573e5f09a64667e4faeaf3ac661600ec6caRational Build Forge11leighw@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-08T16:34:37.000-05:00100030563e276d758ed842caafe77770d60dedeaRational Asset Manager 7.2.0.111gmendel@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Asset Manager helps to create, modify, govern, find and reuse development assets, including SOA and systems development assets. It facilitates the reuse of all types of software development related assets, potentially saving development time.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-14T14:30:57.000-05:0010003854e3067f999edf4914932295cfb5f79d59WebSphere Portal/WCM 6.1.511mlamb@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBMĀ® WebSphereĀ® Portal Server enables you to quickly consolidate applications and content into role-based applications, complete with search, personalization, and security capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-12T18:06:29.000-05:00100038640112efd8f1e144998f2a70a165d00bd3Rational Quality Manager11brownms@gmail.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Quality Manager provides a collaborative application lifecycle management (ALM) environment for test planning, construction, and execution.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T09:40:12.000-05:00100038653fbf6936e5cb42b5959ad9837add054fIBM Mashup Center with IBM Lotus Widget Factory11mgilmore@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM Mashup Center is an end-to-end enterprise mashup platform, supporting rapid assembly of dynamic web applications with the management, security, and governance capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T10:44:24.000-05:0010003780425e2dfef95647498561f98c4de356abRational Team Concert11sonia_dimitrov@ca.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Team Concert is a collaborative software delivery environment that empowers project teams to simplify, automate and govern software delivery.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/GettingStarted.htmlMEDIUMLARGE2010-01-19T14:13:58.000-05:0010003785c4867b72f2fc43fe982e76c76c32efaaLotus Forms Turbo 3.5.111rlintern@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Lotus Forms Turbo requires no training and is designed to help customers address basic form software requirements such as surveys, applications, feedback, orders, request for submission, and more - without involvement from the IT department.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/GettingStarted.htmlLARGE2010-01-22T13:27:08.000-05:0010005598Rational Requirements Composer11mutdosch@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Requirements Composer helps teams define and use requirements effectively across the project lifecycle.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/GettingStarted.htmlMEDIUMLARGE2010-02-08T11:43:18.000-05:0010007509Rational Software Architecture11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 2/22/10 8:06 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/GettingStarted.htmlLARGE2010-02-22T20:03:18.000-05:0010008319WebSphere Feature Pack for OSGi Apps and JPA 2.011radavenp@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM WebSphere Application Server V7.0 Fix Pack 7, Feature Pack for OSGi Applications and Java Persistence API 2.0 Open Beta, and Feature Pack for Service Component Architecture (SCA) V1.0.1 Fix Pack V1.0.1.1https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-03-14T21:06:38.000-04:0010008273Rational Software Architect for WebSphere11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 3/15/10 12:21 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/GettingStarted.htmlLARGE2010-03-15T12:17:26.000-04:0010008404Rational Application Developer11khiamt@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2An Eclipse-based IDE with visual development features that helps Java developers rapidly design, develop, assemble, test, profile and deploy high quality Java/J2EE, Portal, Web/Web 2.0, Web services and SOA applications. (03/16/2010)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/GettingStarted.htmlMEDIUMLARGE2010-03-16T00:10:30.000-04:00 \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/instances_deleted.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/instances_deleted.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/instances_deleted.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/instances_deleted.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/instances.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/instances.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/instances.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/instances.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28193128193RAD instancewoodser@us.ibm.com10008404MEDIUMasdff22010-04-15T15:20:10.317-04:002010-04-22T15:20:19.564-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/locations.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/locations.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/locations.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/locations.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -1US North East: Poughkeepsie, NYPOK50100200ext3SMALLMEDIUMLARGE \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/reboot_active.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/reboot_active.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/reboot_active.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/reboot_active.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru libcloud-0.5.0/test/compute/fixtures/ibm_sbc/sizes.xml libcloud-0.15.1/test/compute/fixtures/ibm_sbc/sizes.xml --- libcloud-0.5.0/test/compute/fixtures/ibm_sbc/sizes.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/ibm_sbc/sizes.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru libcloud-0.5.0/test/compute/fixtures/meta/helloworld.txt libcloud-0.15.1/test/compute/fixtures/meta/helloworld.txt --- libcloud-0.5.0/test/compute/fixtures/meta/helloworld.txt 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/meta/helloworld.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Hello, World! \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/opennebula/computes.xml libcloud-0.15.1/test/compute/fixtures/opennebula/computes.xml --- libcloud-0.5.0/test/compute/fixtures/opennebula/computes.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opennebula/computes.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/opennebula/compute.xml libcloud-0.15.1/test/compute/fixtures/opennebula/compute.xml --- libcloud-0.5.0/test/compute/fixtures/opennebula/compute.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opennebula/compute.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - - - 5 - MyCompute - ACTIVE - - - - - - - - - small - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/opennebula/disk.xml libcloud-0.15.1/test/compute/fixtures/opennebula/disk.xml --- libcloud-0.5.0/test/compute/fixtures/opennebula/disk.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opennebula/disk.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - 1 - UbuntuServer9.04-Contextualized - 5120 - file:///Users/oneuser/ubuntu-server-9.04/ubuntu-server-9.04.img - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/opennebula/storage.xml libcloud-0.15.1/test/compute/fixtures/opennebula/storage.xml --- libcloud-0.5.0/test/compute/fixtures/opennebula/storage.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opennebula/storage.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ - - - - NA1 - US - East - Ashburn - Virginia - US - https://opsource-na1.cloud-vpn.net/ - true - - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ - - - - 53b4c05b-341e-4ac3-b688-bdd74e53ca9b - test-net1 - test-net1 description - NA1 - 10.162.1.0 - false - - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Delete Server - ERROR - Operation in progress on Server with Id 11 - REASON_392 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Delete Server - SUCCESS - Server "Delete" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Power Off Server - ERROR - Operation in progress on Server with Id 11 - REASON_392 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Power Off Server - SUCCESS - Server "Power Off" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Restart Server - ERROR - Operation in progress on Server with Id 11 - REASON_392 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - - Restart Server - SUCCESS - Server "Restart" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Graceful Shutdown Server - ERROR - Operation in progress on Server with Id 11 - REASON_392 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - - Graceful Shutdown Server - SUCCESS - Server "Graceful Shutdown" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Start Server - ERROR - Operation in progress on Server with Id 11 - REASON_392 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - Start Server - SUCCESS - Server "Start" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ - - - - abadbc7e-9e10-46ca-9d4a-194bcc6b6c16 - testnode01 - this is testnode01 description - - 2 - 2048 - 10 - 20 - - UNIX - REDHAT5/64 - - - 44ed8b72-ebea-11df-bdc1-001517c46384 - 53b4c05b-341e-4ac3-b688-bdd78e43ca9e - 10.162.1.1 - 10-162-1-1 - true - 2011-03-02T17:16:09.882Z - - - dbadbc8e-9e10-56ca-5d4a-155bcc5b5c15 - testnode02 - this is testnode02 description - - 4 - 4096 - 10 - 20 - - UNIX - REDHAT5/64 - - - 44ed8b72-ebea-11df-bdc1-001517c46384 - 53b4c05b-341e-4ac3-b688-bdd78e43ca9e - 10.162.1.2 - 10-162-1-2 - true - 2011-03-02T17:16:10.882Z - - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ - - - - e75ead52-692f-4314-8725-c8a4f4d13a87 - test2 - test2 node - - 1 - 2048 - 10 - 0 - - UNIX - REDHAT5/64 - - - 52ed8b72-ebea-11df-bdc1-001517c46384 - 52f4c05b-341e-4ac3-b688-bdd78e43ca9e - 10.162.151.11 - - DEPLOY_SERVER - 2011-03-20T22:32:23.000Z - copia - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml libcloud-0.15.1/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - - Deploy Server - SUCCESS - Server "Deploy" issued - REASON_0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_base_image.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_base_image.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_base_image.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_base_image.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,339 +0,0 @@ - - - - 52ed8b72-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed8b72-ebea-11df-bdc1-001517c46384 - RedHat 5.5 64-bit 1 CPU - RedHat 5.5 Enterprise (Tikanga), 64-bit - - UNIX - REDHAT5/64 - - NA1 - 1 - 2048 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed8dca-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed8dca-ebea-11df-bdc1-001517c46384 - RedHat 5.5 64-bit 2 CPU - RedHat 5.5 Enterprise (Tikanga), 64-bit - - UNIX - REDHAT5/64 - - NA1 - 2 - 4096 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed8ed8-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed8ed8-ebea-11df-bdc1-001517c46384 - RedHat 5.5 64-bit 4 CPU - RedHat 5.5 Enterprise (Tikanga), 64-bit - - UNIX - REDHAT5/64 - - NA1 - 4 - 6144 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 6fc040ae-3605-11e0-bfb5-001517c46384 - /oec/base/image/6fc040ae-3605-11e0-bfb5-001517c46384 - RedHat 5.5 32-bit 1 CPU - RedHat 5.5 Enterprise (Tikanga), 32-bit - - UNIX - REDHAT5/32 - - NA1 - 1 - 2048 - 10 - 0 - 2011-02-11T17:36:19.000Z - - - 52ed92d4-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed92d4-ebea-11df-bdc1-001517c46384 - Ubuntu 8.04.4 2 CPU - Ubuntu 8.04.4 LTS, 64-bit - - UNIX - UBUNTU8/64 - - NA1 - 2 - 4096 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed876c-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed876c-ebea-11df-bdc1-001517c46384 - Win2008 Ent 64-bit R2 2 CPU - Windows 2008 Enterprise R2 64-bit - - WINDOWS - WIN2008R2E/64 - - NA1 - 2 - 4096 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed8a5a-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed8a5a-ebea-11df-bdc1-001517c46384 - Win2008 Ent 64-bit R2 4 CPU - Windows 2008 Enterprise R2 64-bit - - WINDOWS - WIN2008R2E/64 - - NA1 - 4 - 8192 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed865e-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed865e-ebea-11df-bdc1-001517c46384 - Win2008 Std 64-bit R2 2 CPU - Windows 2008 Standard R2 64-bit - - WINDOWS - WIN2008R2S/64 - - NA1 - 2 - 4096 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7b96-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7b96-ebea-11df-bdc1-001517c46384 - Win2008 Std 32-bit 1 CPU - Windows 2008 Standard SP2 32-bit - - WINDOWS - WIN2008S/32 - - NA1 - 1 - 2048 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7cb8-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7cb8-ebea-11df-bdc1-001517c46384 - Win2008 Std 32-bit 2 CPU - Windows 2008 Standard SP2 32-bit - - WINDOWS - WIN2008S/32 - - NA1 - 2 - 4096 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7da8-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7da8-ebea-11df-bdc1-001517c46384 - Win2008 Std 32-bit 4 CPU - Windows 2008 Standard SP2 32-bit - - WINDOWS - WIN2008S/32 - - NA1 - 4 - 4096 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7ea2-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7ea2-ebea-11df-bdc1-001517c46384 - Win2008 Ent 32-bit 2 CPU - Windows 2008 Enterprise SP2 32-bit - - WINDOWS - WIN2008E/32 - - NA1 - 2 - 4096 - 50 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed8fd2-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed8fd2-ebea-11df-bdc1-001517c46384 - Red Hat 4.8 32-bit 1 CPU - Red Hat ES 4.8 (Nahant), 32-bit - - UNIX - REDHAT4/32 - - NA1 - 1 - 2048 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed90cc-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed90cc-ebea-11df-bdc1-001517c46384 - CentOS 5.5 32-bit 1 CPU - CentOS release 5.5, 32-bit - - UNIX - CENTOS5/32 - - NA1 - 1 - 2048 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed91da-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed91da-ebea-11df-bdc1-001517c46384 - CentOS 5.5 64-bit 1 CPU - CentOS release 5.5, 64-bit - - UNIX - CENTOS5/64 - - NA1 - 1 - 2048 - 10 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed766e-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed766e-ebea-11df-bdc1-001517c46384 - Win2003 Ent 32-bit 1 CPU - Windows 2003 Enterprise SP2 32-bit - - WINDOWS - WIN2003E/32 - - NA1 - 1 - 2048 - 16 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7876-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7876-ebea-11df-bdc1-001517c46384 - Win2003 Ent 32-bit 2 CPU - Windows 2003 Enterprise SP2 32-bit - - WINDOWS - WIN2003E/32 - - NA1 - 2 - 4096 - 16 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7984-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7984-ebea-11df-bdc1-001517c46384 - Win2003 Ent 32-bit 4 CPU - Windows 2003 Enterprise SP2 32-bit - - WINDOWS - WIN2003E/32 - - NA1 - 4 - 4096 - 16 - 0 - 1970-01-01T00:00:02.010Z - - - 52ed7a88-ebea-11df-bdc1-001517c46384 - /oec/base/image/52ed7a88-ebea-11df-bdc1-001517c46384 - Win2003 Std 64-bit 2 CPU - Windows 2003 Standard x64 SP2, 64-bit - - WINDOWS - WIN2003S/64 - - NA1 - 2 - 4096 - 16 - 0 - 1970-01-01T00:00:02.010Z - - - 0c231ef0-2a42-11e0-bfb5-001517c46384 - /oec/base/image/0c231ef0-2a42-11e0-bfb5-001517c46384 - RedHat 64-bit 2 CPU with MySQL - RedHat 5.5 Enterprise with MySQL 5.5 installed - - UNIX - REDHAT5/64 - - NA1 - 2 - 8192 - 10 - 0 - 2011-01-27T18:19:58.000Z - - - 2fb5261a-2a42-11e0-bfb5-001517c46384 - /oec/base/image/2fb5261a-2a42-11e0-bfb5-001517c46384 - RedHat 64-bit 2 CPU with PostgreSQL - RedHat 5.5 Enterprise with PostgreSQL 9.0 installed - - UNIX - REDHAT5/64 - - NA1 - 2 - 8192 - 10 - 0 - 2011-01-27T18:20:57.000Z - - diff -Nru libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_myaccount.xml libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_myaccount.xml --- libcloud-0.5.0/test/compute/fixtures/opsource/oec_0_9_myaccount.xml 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/opsource/oec_0_9_myaccount.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ - - - testuser - Test User - Test - User - test@example.com - 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 - - - create image - - - reports - - - server - - - primary administrator - - - network - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_flavors_detail.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_flavors_detail.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_flavors_detail.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_flavors_detail.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_images_detail.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_images_detail.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_images_detail.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_images_detail.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_images_post.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_images_post.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_images_post.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_images_post.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_limits.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_limits.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_limits.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_limits.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail_empty.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail_empty.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail_empty.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail_empty.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail_metadata.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail_metadata.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail_metadata.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail_metadata.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - - - - somevalue - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_detail.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_detail.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_ips.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_ips.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_ips.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_ips.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_metadata.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_metadata.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers_metadata.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers_metadata.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - - - - b - d - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_servers.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_servers.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups_detail.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups_detail.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups_detail.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups_detail.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_groups.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_group.xml libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_group.xml --- libcloud-0.5.0/test/compute/fixtures/rackspace/v1_slug_shared_ip_group.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rackspace/v1_slug_shared_ip_group.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_distributions.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_distributions.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_distributions.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_distributions.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -{ "get_distros_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : "Here are the distros we are offering on new orders." - , "response_display_duration_type" : "REGULAR" - , "distro_infos" : - [{ "distro_code" : "lenny" - , "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"} - , { "distro_code" : "centos5" - , "distro_description" : "Centos5"} - , { "distro_code" : "ubuntu904" - , "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"} - , { "distro_code" : "ubuntu804" - , "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"} - , { "distro_code" : "ubuntu810" - , "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"} - , { "distro_code" : "fedora10" - , "distro_description" : "Fedora 10"}] - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -{ "get_orders_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : "Found 15 orders" - , "response_display_duration_type" : "REGULAR", - "about_orders" : - [{ "order_oid" : 88833465 - , "domain_name" : "api.ivan.net.nz" - , "slug" : "order-88833465-api-ivan-net-nz" - , "billing_oid" : 96122465 - , "is_on_customers_own_physical_server" : false - , "vps_parameters" : { "memory_mb" : 160 - , "disk_space_mb" : 4096 - , "disk_space_2_mb" : 0} - , "host_server_oid" : "764" - , "server_type" : "VPS" - , "data_transfer_allowance" : { "data_transfer_gb" : 30 - , "data_transfer" : "30"} - , "billing_info" : { "monthly_recurring_fee": 19.99 } - , "allocated_ips" : { "primary_ip" : "1.2.3.4" - , "secondary_ips" : ["1.2.3.5","1.2.3.6"]} - , "running_state" : "RUNNING" - }] - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_new_vps.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_new_vps.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_new_vps.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_new_vps.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -{ "post_new_vps_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : null - , "response_display_duration_type" : "REGULAR" - , "setup_messages" : - ["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: Ivan Meredith" - , "No VPS paramters provided, using default values."] - , "about_order" : - { "order_oid" : 52255865 - , "domain_name" : "api.ivan.net.nz" - , "slug" : "order-52255865-api-ivan-net-nz" - , "billing_oid" : 96122465 - , "is_on_customers_own_physical_server" : false - , "vps_parameters" : - { "memory_mb" : 160 - , "disk_space_mb" : 4096 - , "disk_space_2_mb" : 0} - , "host_server_oid" : "764" - , "server_type" : "VPS" - , "data_transfer_allowance" : - { "data_transfer_gb" : 30 , "data_transfer" : "30"} - , "billing_info" : { "monthly_recurring_fee" : 19.99 } - , "allocated_ips" : - { "primary_ip" : "74.50.57.80", "secondary_ips" : []} - , "running_state" : "RUNNING"} - , "new_order_request" : - { "billing_oid" : 96122465 - , "user_oid" : 0 - , "host_server_oid" : null - , "vps_order_oid_to_clone" : 0 - , "ip_request" : - { "num_ips" : 1, "extra_ip_reason" : ""} - , "vps_parameters" : - { "memory_mb" : 160 - , "disk_space_mb" : 4096 - , "disk_space_2_mb" : 0} - , "pricing_plan_code" : "MIRO1B" - , "instantiation_options" : - { "control_panel" : "webmin" - , "domain_name" : "api.ivan.net.nz" - , "password" : "aruxauce27" - , "distro" : "lenny"}} - , "running_vps_info" : - { "pings_ok" : true - , "current_kernel" : "default" - , "current_kernel_canonical" : "2.6.30.5-xenU.i386" - , "last_backup_message" : "" - , "is_console_login_enabled" : false - , "console_public_authorized_keys" : null - , "is_backup_running" : false - , "is_backups_enabled" : true - , "next_backup_time" : - { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} - , "vps_uptime_s" : 31 - , "vps_cpu_time_s" : 6 - , "running_state" : "RUNNING" - , "is_suspended" : false} - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -{ "delete_server_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : "Server removed" - , "response_display_duration_type" : "REGULAR" - , "cancel_messages" : - ["api.ivan.net.nz is being shut down." - , "A $7.98 credit has been added to your account." - , "If you need to un-cancel the server please contact our support team."] - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -{ "put_running_state_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK." - , "response_display_duration_type" : "REGULAR" - , "is_restarted" : true - , "is_pinging" : true - , "running_vps_info" : - { "pings_ok" : true - , "current_kernel" : "default" - , "current_kernel_canonical" : "2.6.30.5-xenU.i386" - , "last_backup_message" : "" - , "is_console_login_enabled" : false - , "console_public_authorized_keys" : null - , "is_backup_running" : false - , "is_backups_enabled" : true - , "next_backup_time" : - { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} - , "vps_uptime_s" : 19 - , "vps_cpu_time_s" : 5 - , "running_state" : "RUNNING" - , "is_suspended" : false} - , "host_server_info" : { "is_host64_bit_capable" : true - , "default_kernel_i386" : "2.6.30.5-xenU.i386" - , "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64" - , "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz" - , "host_num_cores" : 1 - , "host_xen_version" : "3.4.1" - , "hostload" : [1.45 - , 0.56 - , 0.28] - , "host_uptime_s" : 3378276 - , "host_mem_mb_free" : 51825 - , "host_mem_mb_total" : 73719 - , "running_vpss" : 34} - , "running_state_messages" : null - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_pricing_plans.json libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_pricing_plans.json --- libcloud-0.5.0/test/compute/fixtures/rimuhosting/r_pricing_plans.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/rimuhosting/r_pricing_plans.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -{"get_pricing_plans_response" : - { "status_message" : null - , "status_code" : 200 - , "error_info" : null - , "response_type" : "OK" - , "human_readable_message" : "Here some pricing plans we are offering on new orders.  Note we offer most disk and memory sizes.  So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit.  Pricing is in USD.  If you are an NZ-based customer then we would need to add GST." - , "response_display_duration_type" : "REGULAR" - , "pricing_plan_infos" : - [{ "pricing_plan_code" : "MiroVPSLowContention" - , "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)" - , "monthly_recurring_fee" : 32.54 - , "monthly_recurring_amt" : { - "amt" : 35.0 - , "currency" : "CUR_AUD" - , "amt_usd" : 32.54} - , "minimum_memory_mb" : 950 - , "minimum_disk_gb" : 20 - , "minimum_data_transfer_allowance_gb" : 75 - , "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention" - , "server_type" : "VPS" - , "offered_at_data_center" : - { "data_center_location_code" : "DCDALLAS" - , "data_center_location_name" : "Dallas" } - }] - } -} diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/flavors.xml libcloud-0.15.1/test/compute/fixtures/slicehost/flavors.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/flavors.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/flavors.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ - - - - 1 - 256 slice - 2000 - 256 - - - 2 - 512 slice - 3800 - 512 - - - 3 - 1GB slice - 7000 - 1024 - - - 4 - 2GB slice - 13000 - 2048 - - - 5 - 4GB slice - 25000 - 4096 - - - 6 - 8GB slice - 45000 - 8192 - - - 7 - 15.5GB slice - 80000 - 15872 - - diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/images.xml libcloud-0.15.1/test/compute/fixtures/slicehost/images.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/images.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/images.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ - - - - CentOS 5.2 - 2 - - - Gentoo 2008.0 - 3 - - - Debian 5.0 (lenny) - 4 - - - Fedora 10 (Cambridge) - 5 - - - CentOS 5.3 - 7 - - - Ubuntu 9.04 (jaunty) - 8 - - - Arch 2009.02 - 9 - - - Ubuntu 8.04.2 LTS (hardy) - 10 - - - Ubuntu 8.10 (intrepid) - 11 - - - Red Hat EL 5.3 - 12 - - - Fedora 11 (Leonidas) - 13 - - diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml libcloud-0.15.1/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ - - Permission denied - diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/slices_1_reboot.xml libcloud-0.15.1/test/compute/fixtures/slicehost/slices_1_reboot.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/slices_1_reboot.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/slices_1_reboot.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - - libcloud-test - 10 - -
174.143.212.229
-
10.176.164.199
-
- 100 - 70507 - 0.0 - 0.0 - 1 - reboot - 174.143.212.229 -
diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/slices_errors.xml libcloud-0.15.1/test/compute/fixtures/slicehost/slices_errors.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/slices_errors.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/slices_errors.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - Slice parameters are not properly nested - diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/slices_get.xml libcloud-0.15.1/test/compute/fixtures/slicehost/slices_get.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/slices_get.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/slices_get.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ - - - libcloud-foo - 10 - -
174.143.212.229
-
10.176.164.199
-
- 0 - 1 - 0.0 - 0.0 - 1 - build - 174.143.212.229 -
-
diff -Nru libcloud-0.5.0/test/compute/fixtures/slicehost/slices_post.xml libcloud-0.15.1/test/compute/fixtures/slicehost/slices_post.xml --- libcloud-0.5.0/test/compute/fixtures/slicehost/slices_post.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/slicehost/slices_post.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - slicetest - 11 - -
10.176.168.15
-
67.23.20.114
-
- fooadfa1231 - 0 - 71907 - 0.0 - 0.0 - 1 - build - 10.176.168.15 -
diff -Nru libcloud-0.5.0/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml libcloud-0.15.1/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml --- libcloud-0.5.0/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,1066 +0,0 @@ - - - - - - - - - - privateNetworkOnlyFlag - - 0 - - - - id - - 1832 - - - - accountId - - 11111 - - - - statusId - - 1001 - - - - uuid - - eaa9aaa2-8e2e-d6e0-ce11-6f01e765779c - - - - hostname - - test1 - - - - domain - - libcloud.org - - - - maxCpu - - 2 - - - - maxCpuUnits - - CORE - - - - maxMemory - - 2048 - - - - startCpus - - 2 - - - - createDate - - 2009-09-04T14:49:45-05:00 - - - - modifyDate - - 2010-04-22T13:08:47-05:00 - - - - metricPollDate - - 2010-04-22T13:08:00-05:00 - - - - dedicatedAccountHostOnlyFlag - - 0 - - - - powerState - - - - name - - Running - - - - keyName - - RUNNING - - - - - - - softwareComponents - - - - - - - id - - 191115 - - - - hardwareId - - - - - - manufacturerLicenseInstance - - - - - - passwords - - - - - - - id - - 166980 - - - - softwareId - - 191115 - - - - username - - root - - - - password - - TEST - - - - createDate - - 2009-09-04T14:49:51-05:00 - - - - modifyDate - - 2009-09-04T14:49:51-05:00 - - - - port - - - - - - - - - - - - - - - - - - primaryIpAddress - - 67.254.254.254 - - - - primaryBackendIpAddress - - 10.254.254.254 - - - - - - - - privateNetworkOnlyFlag - - 0 - - - - id - - 13402 - - - - accountId - - 11111 - - - - statusId - - 1001 - - - - uuid - - 9e9e9e99-4ed9-4645-19f3-55ee4e404d56 - - - - hostname - - test2 - - - - domain - - libcloud.org - - - - maxCpu - - 1 - - - - maxCpuUnits - - CORE - - - - maxMemory - - 1024 - - - - startCpus - - 1 - - - - createDate - - 2010-03-23T17:06:13-05:00 - - - - modifyDate - - 2010-04-22T13:08:43-05:00 - - - - metricPollDate - - 2010-04-22T13:08:00-05:00 - - - - dedicatedAccountHostOnlyFlag - - 0 - - - - powerState - - - - name - - Running - - - - keyName - - RUNNING - - - - - - - softwareComponents - - - - - - - id - - 257314 - - - - hardwareId - - - - - - manufacturerLicenseInstance - - - - - - passwords - - - - - - - id - - 235268 - - - - softwareId - - 257314 - - - - username - - root - - - - password - - TEST - - - - createDate - - 2010-03-23T17:06:17-05:00 - - - - modifyDate - - 2010-03-23T17:06:17-05:00 - - - - port - - - - - - - - - - - - - - - - - - primaryIpAddress - - 174.254.254.254 - - - - primaryBackendIpAddress - - 10.254.254.254 - - - - - - - - privateNetworkOnlyFlag - - 0 - - - - id - - 19293 - - - - accountId - - 11111 - - - - statusId - - 1001 - - - - uuid - - 9f99e19b-2c61-9cd5-2081-67b57fd7977b - - - - hostname - - test3 - - - - domain - - libcloud.org - - - - maxCpu - - 2 - - - - maxCpuUnits - - CORE - - - - maxMemory - - 1024 - - - - startCpus - - 2 - - - - createDate - - 2010-04-22T12:38:53-05:00 - - - - modifyDate - - 2010-04-22T13:08:01-05:00 - - - - metricPollDate - - 2010-04-22T13:08:00-05:00 - - - - dedicatedAccountHostOnlyFlag - - 1 - - - - powerState - - - - name - - Running - - - - keyName - - RUNNING - - - - - - - softwareComponents - - - - - - - id - - 277185 - - - - hardwareId - - - - - - manufacturerLicenseInstance - - - - - - passwords - - - - - - - id - - 250826 - - - - softwareId - - 277185 - - - - username - - root - - - - password - - TEST - - - - createDate - - 2010-04-22T12:38:57-05:00 - - - - modifyDate - - 2010-04-22T12:38:57-05:00 - - - - port - - - - - - - - - - - - - - - - - - primaryIpAddress - - 174.254.254.254 - - - - primaryBackendIpAddress - - 10.254.254.254 - - - - - - - - privateNetworkOnlyFlag - - 0 - - - - id - - 19288 - - - - accountId - - 11111 - - - - statusId - - 1001 - - - - uuid - - 999f77d9-679b-c47d-136d-04cd302384ec - - - - hostname - - test4 - - - - domain - - libcloud.org - - - - maxCpu - - 2 - - - - maxCpuUnits - - CORE - - - - maxMemory - - 1024 - - - - startCpus - - 2 - - - - createDate - - 2010-04-22T12:15:24-05:00 - - - - modifyDate - - 2010-04-22T13:08:31-05:00 - - - - metricPollDate - - 2010-04-22T13:08:00-05:00 - - - - dedicatedAccountHostOnlyFlag - - 1 - - - - powerState - - - - name - - Running - - - - keyName - - RUNNING - - - - - - - softwareComponents - - - - - - - id - - 277171 - - - - hardwareId - - - - - - manufacturerLicenseInstance - - - - - - passwords - - - - - - - id - - 250815 - - - - softwareId - - 277171 - - - - username - - root - - - - password - - TEST - - - - createDate - - 2010-04-22T12:15:26-05:00 - - - - modifyDate - - 2010-04-22T12:15:26-05:00 - - - - port - - - - - - - - - - - - - - - - - - primaryIpAddress - - 174.254.254.254 - - - - primaryBackendIpAddress - - 10.254.254.254 - - - - - - - - privateNetworkOnlyFlag - - 0 - - - - id - - 19284 - - - - accountId - - 11111 - - - - statusId - - 1001 - - - - uuid - - f3c73738-7731-1372-f3c3-e6808082f824 - - - - hostname - - test5 - - - - domain - - libcloud.org - - - - maxCpu - - 2 - - - - maxCpuUnits - - CORE - - - - maxMemory - - 1024 - - - - startCpus - - 2 - - - - createDate - - 2010-04-22T12:11:23-05:00 - - - - modifyDate - - 2010-04-22T13:08:31-05:00 - - - - metricPollDate - - 2010-04-22T13:08:00-05:00 - - - - dedicatedAccountHostOnlyFlag - - 1 - - - - powerState - - - - name - - Running - - - - keyName - - RUNNING - - - - - - - softwareComponents - - - - - - - id - - 277167 - - - - hardwareId - - - - - - manufacturerLicenseInstance - - - - - - passwords - - - - - - - id - - 250811 - - - - softwareId - - 277167 - - - - username - - root - - - - password - - TEST - - - - createDate - - 2010-04-22T12:11:27-05:00 - - - - modifyDate - - 2010-04-22T12:11:27-05:00 - - - - port - - - - - - - - - - - - - - - - - - primaryIpAddress - - 174.254.254.254 - - - - primaryBackendIpAddress - - 10.254.254.254 - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml libcloud-0.15.1/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml --- libcloud-0.5.0/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ - - - - - - - - - - id - - 2 - - - - name - - dal00 - - - - longName - - Corporate HQ - - - - - - - - id - - 3 - - - - name - - dal01 - - - - longName - - Dallas - - - - - - - - id - - 18171 - - - - name - - sea01 - - - - longName - - Seattle - - - - - - - - id - - 37473 - - - - name - - wdc01 - - - - longName - - Washington, DC - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ - - - - - 0 - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_login.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_login.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_login.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_login.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_org_240.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_org_240.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_org_240.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_org_240.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_task_10496.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_task_10496.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_task_10496.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_task_10496.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_task_11001.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_task_11001.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_task_11001.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_task_11001.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ - - - - - - - 10.112.78.69 - - - - The kind of installed guest operating system - Red Hat Enterprise Linux 5 (32-bit) - - - Virtual Hardware - - - - - - - - - - - - - Virtual Hardware Family - 0 - - - - - - testerpart2 - vmx-07 - - -
- - hertz * 10^6 - - - - - Number of Virtual CPUs - 2 virtual CPU(s) - 1 - - - - - - - - 3 - 2 - count - - - -
- - byte * 2^20 - - - - - Memory Size - 512MB of memory - 2 - - - - - - - - 4 - 512 - byte * 2^20 - - - -
0
- - - - - - - SCSI Controller - SCSI Controller 0 - 3 - - - - - - - lsilogic - 6 - - - -
- -
- 0 - - - - - - - Hard Disk 1 - 10485760 - 9 - - - - 3 - - - - 17 - 10485760 - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ - - - - - - - 10.112.78.69 - - - - The kind of installed guest operating system - Red Hat Enterprise Linux 5 (32-bit) - - - Virtual Hardware - - - - - - - - - - - - - Virtual Hardware Family - 0 - - - - - - testerpart2 - vmx-07 - - -
- - hertz * 10^6 - - - - - Number of Virtual CPUs - 2 virtual CPU(s) - 1 - - - - - - - - 3 - 2 - count - - - -
- - byte * 2^20 - - - - - Memory Size - 512MB of memory - 2 - - - - - - - - 4 - 512 - byte * 2^20 - - - -
0
- - - - - - - SCSI Controller - SCSI Controller 0 - 3 - - - - - - - lsilogic - 6 - - - -
- -
- 0 - - - - - - - Hard Disk 1 - 10485760 - 9 - - - - 3 - - - - 17 - 10485760 - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml --- libcloud-0.5.0/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/create_node.xml libcloud-0.15.1/test/compute/fixtures/voxel/create_node.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/create_node.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/create_node.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - 1234 - 1235386846 - QUEUED - - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/failure.xml libcloud-0.15.1/test/compute/fixtures/voxel/failure.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/failure.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/failure.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/images.xml libcloud-0.15.1/test/compute/fixtures/voxel/images.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/images.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/images.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ - - - - - 32 - Linux - 2.6.18 - CentOS - 5.1 - root - - - ext3 - 880 - - Base install of CentOS 5.1 i386. - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/locations.xml libcloud-0.15.1/test/compute/fixtures/voxel/locations.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/locations.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/locations.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ - - - - Amsterdam - - - New York - - - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/nodes.xml libcloud-0.15.1/test/compute/fixtures/voxel/nodes.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/nodes.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/nodes.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ - - - - Z100.12 - Virtual Server - Voxrox Intel Platform - - LGA7 - XO / 12th Floor - Private cage - primary - Row Z - Rack 100 - 12 - - - - 172.x.x.x - - - - - user - - - root - - -
zz.zz.us.voxel.net
- 55555 - user -
-
- Voxel TruManaged Server Configuration 1 -
- ... -
-
diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/success.xml libcloud-0.15.1/test/compute/fixtures/voxel/success.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/success.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/success.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru libcloud-0.5.0/test/compute/fixtures/voxel/unauthorized.xml libcloud-0.15.1/test/compute/fixtures/voxel/unauthorized.xml --- libcloud-0.5.0/test/compute/fixtures/voxel/unauthorized.xml 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/fixtures/voxel/unauthorized.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ - - - - voxel.devices.list - - 2010-02-10T23:39:25.808107+0000 - authshouldfail - ae069bb835e998622caaddaeff8c98e0 - - YOUR_SECRETtimestamp2010-02-10T23:39:25.808107+0000methodvoxel.devices.listkeyauthshouldfail - diff -Nru libcloud-0.5.0/test/compute/__init__.py libcloud-0.15.1/test/compute/__init__.py --- libcloud-0.5.0/test/compute/__init__.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from libcloud.compute.base import Node, NodeImage, NodeLocation - -class TestCaseMixin(object): - - def test_list_nodes_response(self): - nodes = self.driver.list_nodes() - self.assertTrue(isinstance(nodes, list)) - for node in nodes: - self.assertTrue(isinstance(node, Node)) - - def test_list_sizes_response(self): - sizes = self.driver.list_sizes() - size = sizes[0] - self.assertTrue(isinstance(sizes, list)) - # Check that size values are ints or None - self.assertTrue(size.ram is None or isinstance(size.ram, int)) - self.assertTrue(size.disk is None or isinstance(size.disk, int)) - self.assertTrue(size.bandwidth is None or - isinstance(size.bandwidth, int)) - - def test_list_images_response(self): - images = self.driver.list_images() - self.assertTrue(isinstance(images, list)) - for image in images: - self.assertTrue(isinstance(image, NodeImage)) - - - def test_list_locations_response(self): - locations = self.driver.list_locations() - self.assertTrue(isinstance(locations, list)) - for dc in locations: - self.assertTrue(isinstance(dc, NodeLocation)) - - def test_create_node_response(self): - # should return a node object - size = self.driver.list_sizes()[0] - image = self.driver.list_images()[0] - node = self.driver.create_node(name='node-name', - image=image, - size=size) - self.assertTrue(isinstance(node, Node)) - - def test_destroy_node_response(self): - # should return a node object - node = self.driver.list_nodes()[0] - ret = self.driver.destroy_node(node) - self.assertTrue(isinstance(ret, bool)) - - def test_reboot_node_response(self): - # should return a node object - node = self.driver.list_nodes()[0] - ret = self.driver.reboot_node(node) - self.assertTrue(isinstance(ret, bool)) - -if __name__ == "__main__": - import doctest - doctest.testmod() diff -Nru libcloud-0.5.0/test/compute/test_backward_compatibility.py libcloud-0.15.1/test/compute/test_backward_compatibility.py --- libcloud-0.5.0/test/compute/test_backward_compatibility.py 2011-04-26 11:30:25.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_backward_compatibility.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest - -class BackwardCompatibilityTests(unittest.TestCase): - def test_all_the_old_paths_works(self): - # Common - from libcloud.types import InvalidCredsError - from libcloud.base import Node, NodeImage, NodeSize, NodeLocation - from libcloud.types import NodeState - from libcloud.types import LibcloudError - - from libcloud.base import Response - from libcloud.base import ConnectionKey, ConnectionUserAndKey - from libcloud.base import NodeAuthPassword - - # Driver specific - from libcloud.drivers.brightbox import BrightboxNodeDriver - from libcloud.drivers.cloudsigma import CloudSigmaZrhNodeDriver - from libcloud.drivers.rimuhosting import RimuHostingNodeDriver - from libcloud.drivers.elastichosts import ElasticHostsBaseNodeDriver - from libcloud.drivers.gogrid import GoGridNodeDriver - from libcloud.common.gogrid import GoGridIpAddress - from libcloud.drivers.linode import LinodeNodeDriver - from libcloud.drivers.vpsnet import VPSNetNodeDriver - from libcloud.drivers.opennebula import OpenNebulaNodeDriver - from libcloud.drivers.ibm_sbc import IBMNodeDriver as IBM - from libcloud.drivers.rackspace import RackspaceNodeDriver as Rackspace - from libcloud.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver - from libcloud.drivers.ec2 import EC2APNENodeDriver, IdempotentParamError - from libcloud.drivers.voxel import VoxelNodeDriver as Voxel - from libcloud.drivers.vcloud import TerremarkDriver - from libcloud.drivers.vcloud import VCloudNodeDriver - from libcloud.drivers.slicehost import SlicehostNodeDriver as Slicehost - from libcloud.drivers.softlayer import SoftLayerNodeDriver as SoftLayer - from libcloud.drivers.ecp import ECPNodeDriver - - from libcloud.drivers.cloudsigma import str2dicts, str2list, dict2str - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_base.py libcloud-0.15.1/test/compute/test_base.py --- libcloud-0.5.0/test/compute/test_base.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest - -from libcloud.common.base import Response -from libcloud.common.base import ConnectionKey, ConnectionUserAndKey -from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver - -from test import MockResponse # pylint: disable-msg=E0611 - -class FakeDriver(object): - type = 0 - -class BaseTests(unittest.TestCase): - - def test_base_node(self): - Node(id=0, name=0, state=0, public_ip=0, private_ip=0, - driver=FakeDriver()) - - def test_base_node_size(self): - NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0, - driver=FakeDriver()) - - def test_base_node_image(self): - NodeImage(id=0, name=0, driver=FakeDriver()) - - def test_base_response(self): - Response(MockResponse(status=200, body='foo')) - - def test_base_node_driver(self): - NodeDriver('foo') - - def test_base_connection_key(self): - ConnectionKey('foo') - - def test_base_connection_userkey(self): - ConnectionUserAndKey('foo', 'bar') - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_bluebox.py libcloud-0.15.1/test/compute/test_bluebox.py --- libcloud-0.5.0/test/compute/test_bluebox.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_bluebox.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox -from libcloud.compute.base import Node, NodeAuthPassword -from libcloud.compute.types import NodeState - - -from test import MockHttp -from test.file_fixtures import ComputeFileFixtures -from test.secrets import BLUEBOX_CUSTOMER_ID, BLUEBOX_API_KEY - -class BlueboxTest(unittest.TestCase): - - def setUp(self): - Bluebox.connectionCls.conn_classes = (None, BlueboxMockHttp) - self.driver = Bluebox(BLUEBOX_CUSTOMER_ID, BLUEBOX_API_KEY) - - def test_create_node(self): - node = self.driver.create_node( - name='foo', - size=self.driver.list_sizes()[0], - image=self.driver.list_images()[0], - auth=NodeAuthPassword("test123") - ) - self.assertTrue(isinstance(node, Node)) - self.assertEqual(node.state, NodeState.PENDING) - self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') - - def test_list_nodes(self): - node = self.driver.list_nodes()[0] - self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') - self.assertEqual(node.state, NodeState.RUNNING) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 4) - - ids = [s.id for s in sizes] - - for size in sizes: - self.assertTrue(size.price > 0) - - self.assertTrue('94fd37a7-2606-47f7-84d5-9000deda52ae' in ids) - self.assertTrue('b412f354-5056-4bf0-a42f-6ddd998aa092' in ids) - self.assertTrue('0cd183d3-0287-4b1a-8288-b3ea8302ed58' in ids) - self.assertTrue('b9b87a5b-2885-4a2e-b434-44a163ca6251' in ids) - - def test_list_images(self): - images = self.driver.list_images() - image = images[0] - self.assertEqual(len(images), 10) - self.assertEqual(image.name, 'CentOS 5 (Latest Release)') - self.assertEqual(image.id, 'c66b8145-f768-45ef-9878-395bf8b1b7ff') - - def test_reboot_node(self): - node = self.driver.list_nodes()[0] - ret = self.driver.reboot_node(node) - self.assertTrue(ret) - - def test_destroy_node(self): - node = self.driver.list_nodes()[0] - ret = self.driver.destroy_node(node) - self.assertTrue(ret) - -class BlueboxMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('bluebox') - - def _api_blocks_json(self, method, url, body, headers): - if method == "POST": - body = self.fixtures.load('api_blocks_json_post.json') - else: - body = self.fixtures.load('api_blocks_json.json') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - - def _api_block_products_json(self, method, url, body, headers): - body = self.fixtures.load('api_block_products_json.json') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - - def _api_block_templates_json(self, method, url, body, headers): - body = self.fixtures.load('api_block_templates_json.json') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - - def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers): - if method == 'DELETE': - body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json') - else: - body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - - def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(self, method, url, body, headers): - body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_brightbox.py libcloud-0.15.1/test/compute/test_brightbox.py --- libcloud-0.5.0/test/compute/test_brightbox.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_brightbox.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -try: - import json -except ImportError: - import simplejson as json - -from libcloud.common.types import InvalidCredsError -from libcloud.compute.drivers.brightbox import BrightboxNodeDriver -from libcloud.compute.types import NodeState - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures -from test.secrets import BRIGHTBOX_CLIENT_ID, BRIGHTBOX_CLIENT_SECRET - - -class BrightboxTest(unittest.TestCase, TestCaseMixin): - def setUp(self): - BrightboxNodeDriver.connectionCls.conn_classes = (None, BrightboxMockHttp) - BrightboxMockHttp.type = None - self.driver = BrightboxNodeDriver(BRIGHTBOX_CLIENT_ID, BRIGHTBOX_CLIENT_SECRET) - - def test_authentication(self): - BrightboxMockHttp.type = 'INVALID_CLIENT' - self.assertRaises(InvalidCredsError, self.driver.list_nodes) - - BrightboxMockHttp.type = 'UNAUTHORIZED_CLIENT' - self.assertRaises(InvalidCredsError, self.driver.list_nodes) - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertEqual(len(nodes), 1) - self.assertTrue('109.107.42.129' in nodes[0].public_ip) - self.assertTrue('10.110.24.54' in nodes[0].private_ip) - self.assertEqual(nodes[0].state, NodeState.RUNNING) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 1) - self.assertEqual(sizes[0].id, 'typ-4nssg') - self.assertEqual(sizes[0].name, 'Brightbox Nano Instance') - self.assertEqual(sizes[0].ram, 512) - - def test_list_images(self): - images = self.driver.list_images() - self.assertEqual(len(images), 1) - self.assertEqual(images[0].id, 'img-9vxqi') - self.assertEqual(images[0].name, 'Brightbox Lucid 32') - self.assertEqual(images[0].extra['arch'], '32-bit') - - def test_reboot_node_response(self): - node = self.driver.list_nodes()[0] - self.assertRaises(NotImplementedError, self.driver.reboot_node, [node]) - - def test_destroy_node(self): - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.destroy_node(node)) - - def test_create_node(self): - size = self.driver.list_sizes()[0] - image = self.driver.list_images()[0] - node = self.driver.create_node(name='Test Node', image=image, size=size) - self.assertEqual('srv-3a97e', node.id) - self.assertEqual('Test Node', node.name) - - -class BrightboxMockHttp(MockHttp): - fixtures = ComputeFileFixtures('brightbox') - - def _token(self, method, url, body, headers): - if method == 'POST': - return self.response(httplib.OK, self.fixtures.load('token.json')) - - def _token_INVALID_CLIENT(self, method, url, body, headers): - if method == 'POST': - return self.response(httplib.BAD_REQUEST, '{"error":"invalid_client"}') - - def _token_UNAUTHORIZED_CLIENT(self, method, url, body, headers): - if method == 'POST': - return self.response(httplib.UNAUTHORIZED, '{"error":"unauthorized_client"}') - - def _1_0_images(self, method, url, body, headers): - if method == 'GET': - return self.response(httplib.OK, self.fixtures.load('list_images.json')) - - def _1_0_servers(self, method, url, body, headers): - if method == 'GET': - return self.response(httplib.OK, self.fixtures.load('list_servers.json')) - elif method == 'POST': - body = json.loads(body) - - node = json.loads(self.fixtures.load('create_server.json')) - - node['name'] = body['name'] - - return self.response(httplib.ACCEPTED, json.dumps(node)) - - def _1_0_servers_srv_3a97e(self, method, url, body, headers): - if method == 'DELETE': - return self.response(httplib.ACCEPTED, '') - - def _1_0_server_types(self, method, url, body, headers): - if method == 'GET': - return self.response(httplib.OK, self.fixtures.load('list_server_types.json')) - - def _1_0_zones(self, method, url, body, headers): - if method == 'GET': - return self.response(httplib.OK, self.fixtures.load('list_zones.json')) - - def response(self, status, body): - return (status, body, {'content-type': 'application/json'}, httplib.responses[status]) - - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_cloudsigma.py libcloud-0.15.1/test/compute/test_cloudsigma.py --- libcloud-0.5.0/test/compute/test_cloudsigma.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_cloudsigma.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,204 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import unittest -import httplib - -from libcloud.compute.base import Node -from libcloud.compute.drivers.cloudsigma import CloudSigmaZrhNodeDriver -from libcloud.utils import str2dicts, str2list, dict2str - -from test import MockHttp # pylint: disable-msg=E0611 -from test.compute import TestCaseMixin # pylint: disable-msg=E0611 -from test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 - - -class CloudSigmaTestCase(unittest.TestCase, TestCaseMixin): - def setUp(self): - CloudSigmaZrhNodeDriver.connectionCls.conn_classes = (None, - CloudSigmaHttp) - self.driver = CloudSigmaZrhNodeDriver('foo', 'bar') - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertTrue(isinstance(nodes, list)) - self.assertEqual(len(nodes), 1) - - node = nodes[0] - self.assertEqual(node.public_ip[0], "1.2.3.4") - self.assertEqual(node.extra['smp'], 1) - self.assertEqual(node.extra['cpu'], 1100) - self.assertEqual(node.extra['mem'], 640) - - def test_list_sizes(self): - images = self.driver.list_sizes() - self.assertEqual(len(images), 9) - - def test_list_images(self): - sizes = self.driver.list_images() - self.assertEqual(len(sizes), 10) - - def test_list_locations_response(self): - pass - - def test_start_node(self): - nodes = self.driver.list_nodes() - node = nodes[0] - self.assertTrue(self.driver.ex_start_node(node)) - - def test_shutdown_node(self): - nodes = self.driver.list_nodes() - node = nodes[0] - self.assertTrue(self.driver.ex_stop_node(node)) - self.assertTrue(self.driver.ex_shutdown_node(node)) - - def test_reboot_node(self): - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.reboot_node(node)) - - def test_destroy_node(self): - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.destroy_node(node)) - self.driver.list_nodes() - - def test_create_node(self): - size = self.driver.list_sizes()[0] - image = self.driver.list_images()[0] - node = self.driver.create_node( - name="cloudsigma node", image=image, size = size) - self.assertTrue(isinstance(node, Node)) - - def test_ex_static_ip_list(self): - ips = self.driver.ex_static_ip_list() - self.assertEqual(len(ips), 3) - - def test_ex_static_ip_create(self): - result = self.driver.ex_static_ip_create() - self.assertEqual(len(result), 2) - self.assertEqual(len(result[0].keys()), 6) - self.assertEqual(len(result[1].keys()), 6) - - def test_ex_static_ip_destroy(self): - result = self.driver.ex_static_ip_destroy('1.2.3.4') - self.assertTrue(result) - - def test_ex_drives_list(self): - result = self.driver.ex_drives_list() - self.assertEqual(len(result), 2) - - def test_ex_drive_destroy(self): - result = self.driver.ex_drive_destroy( - # @@TR: this should be soft-coded: - 'd18119ce_7afa_474a_9242_e0384b160220') - self.assertTrue(result) - - def test_ex_set_node_configuration(self): - node = self.driver.list_nodes()[0] - result = self.driver.ex_set_node_configuration(node, **{'smp': 2}) - self.assertTrue(result) - - def test_str2dicts(self): - string = 'mem 1024\ncpu 2200\n\nmem2048\cpu 1100' - result = str2dicts(string) - self.assertEqual(len(result), 2) - - def test_str2list(self): - string = 'ip 1.2.3.4\nip 1.2.3.5\nip 1.2.3.6' - result = str2list(string) - self.assertEqual(len(result), 3) - self.assertEqual(result[0], '1.2.3.4') - self.assertEqual(result[1], '1.2.3.5') - self.assertEqual(result[2], '1.2.3.6') - - def test_dict2str(self): - d = {'smp': 5, 'cpu': 2200, 'mem': 1024} - result = dict2str(d) - self.assertTrue(len(result) > 0) - self.assertTrue(result.find('smp 5') >= 0) - self.assertTrue(result.find('cpu 2200') >= 0) - self.assertTrue(result.find('mem 1024') >= 0) - -class CloudSigmaHttp(MockHttp): - fixtures = ComputeFileFixtures('cloudsigma') - - def _drives_standard_info(self, method, url, body, headers): - body = self.fixtures.load('drives_standard_info.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_start( - self, method, url, body, headers): - - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_stop( - self, method, url, body, headers): - - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) - - def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_destroy( - self, method, url, body, headers): - - return (httplib.NO_CONTENT, - body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _drives_d18119ce_7afa_474a_9242_e0384b160220_clone( - self, method, url, body, headers): - - body = self.fixtures.load('drives_clone.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _drives_a814def5_1789_49a0_bf88_7abe7bb1682a_info( - self, method, url, body, headers): - - body = self.fixtures.load('drives_single_info.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _drives_info(self, method, url, body, headers): - body = self.fixtures.load('drives_info.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_create(self, method, url, body, headers): - body = self.fixtures.load('servers_create.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_info(self, method, url, body, headers): - body = self.fixtures.load('servers_info.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _resources_ip_list(self, method, url, body, headers): - body = self.fixtures.load('resources_ip_list.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _resources_ip_create(self, method, url, body, headers): - body = self.fixtures.load('resources_ip_create.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _resources_ip_1_2_3_4_destroy(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) - - def _drives_d18119ce_7afa_474a_9242_e0384b160220_destroy( - self, method, url, body, headers): - - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) - - def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_set( - self, method, url, body, headers): - - body = self.fixtures.load('servers_set.txt') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_deployment.py libcloud-0.15.1/test/compute/test_deployment.py --- libcloud-0.5.0/test/compute/test_deployment.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_deployment.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import unittest - -from libcloud.compute.deployment import MultiStepDeployment, Deployment -from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment -from libcloud.compute.base import Node -from libcloud.compute.types import NodeState -from libcloud.compute.ssh import BaseSSHClient -from libcloud.compute.drivers.ec2 import EC2NodeDriver - -class MockDeployment(Deployment): - def run(self, node, client): - return node - -class MockClient(BaseSSHClient): - def __init__(self, *args, **kwargs): - self.stdout = '' - self.stderr = '' - self.exit_status = 0 - - def put(self, path, contents, chmod=755): - return contents - - def run(self, name): - return self.stdout, self.stderr, self.exit_status - - def delete(self, name): - return True - -class DeploymentTests(unittest.TestCase): - - def setUp(self): - self.node = Node(id=1, name='test', state=NodeState.RUNNING, - public_ip=['1.2.3.4'], private_ip='1.2.3.5', - driver=EC2NodeDriver) - - def test_multi_step_deployment(self): - msd = MultiStepDeployment() - self.assertEqual(len(msd.steps), 0) - - msd.add(MockDeployment()) - self.assertEqual(len(msd.steps), 1) - - self.assertEqual(self.node, msd.run(node=self.node, client=None)) - - def test_ssh_key_deployment(self): - sshd = SSHKeyDeployment(key='1234') - - self.assertEqual(self.node, sshd.run(node=self.node, - client=MockClient(hostname='localhost'))) - - def test_script_deployment(self): - sd1 = ScriptDeployment(script='foobar', delete=True) - sd2 = ScriptDeployment(script='foobar', delete=False) - sd3 = ScriptDeployment(script='foobar', delete=False, name='foobarname') - - self.assertTrue(sd1.name.find('deployment') != '1') - self.assertEqual(sd3.name, 'foobarname') - - self.assertEqual(self.node, sd1.run(node=self.node, - client=MockClient(hostname='localhost'))) - self.assertEqual(self.node, sd2.run(node=self.node, - client=MockClient(hostname='localhost'))) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_dreamhost.py libcloud-0.15.1/test/compute/test_dreamhost.py --- libcloud-0.5.0/test/compute/test_dreamhost.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_dreamhost.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,279 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -try: - import json -except: - import simplejson as json - -from libcloud.common.types import InvalidCredsError -from libcloud.compute.drivers.dreamhost import DreamhostNodeDriver -from libcloud.compute.types import NodeState - -from test import MockHttp -from test.compute import TestCaseMixin -from test.secrets import DREAMHOST_KEY - -class DreamhostTest(unittest.TestCase, TestCaseMixin): - - def setUp(self): - DreamhostNodeDriver.connectionCls.conn_classes = ( - None, - DreamhostMockHttp - ) - DreamhostMockHttp.type = None - DreamhostMockHttp.use_param = 'cmd' - self.driver = DreamhostNodeDriver(DREAMHOST_KEY) - - def test_invalid_creds(self): - """ - Tests the error-handling for passing a bad API Key to the DreamHost API - """ - DreamhostMockHttp.type = 'BAD_AUTH' - try: - self.driver.list_nodes() - self.assertTrue(False) # Above command should have thrown an InvalidCredsException - except InvalidCredsError: - self.assertTrue(True) - - - def test_list_nodes(self): - """ - Test list_nodes for DreamHost PS driver. Should return a list of two nodes: - - account_id: 000000 - ip: 75.119.203.51 - memory_mb: 500 - ps: ps22174 - start_date: 2010-02-25 - type: web - - account_id: 000000 - ip: 75.119.203.52 - memory_mb: 1500 - ps: ps22175 - start_date: 2010-02-25 - type: mysql - """ - - nodes = self.driver.list_nodes() - self.assertEqual(len(nodes), 2) - web_node = nodes[0] - mysql_node = nodes[1] - - # Web node tests - self.assertEqual(web_node.id, 'ps22174') - self.assertEqual(web_node.state, NodeState.UNKNOWN) - self.assertTrue('75.119.203.51' in web_node.public_ip) - self.assertTrue( - web_node.extra.has_key('current_size') and - web_node.extra['current_size'] == 500 - ) - self.assertTrue( - web_node.extra.has_key('account_id') and - web_node.extra['account_id'] == 000000 - ) - self.assertTrue( - web_node.extra.has_key('type') and - web_node.extra['type'] == 'web' - ) - # MySql node tests - self.assertEqual(mysql_node.id, 'ps22175') - self.assertEqual(mysql_node.state, NodeState.UNKNOWN) - self.assertTrue('75.119.203.52' in mysql_node.public_ip) - self.assertTrue( - mysql_node.extra.has_key('current_size') and - mysql_node.extra['current_size'] == 1500 - ) - self.assertTrue( - mysql_node.extra.has_key('account_id') and - mysql_node.extra['account_id'] == 000000 - ) - self.assertTrue( - mysql_node.extra.has_key('type') and - mysql_node.extra['type'] == 'mysql' - ) - - def test_create_node(self): - """ - Test create_node for DreamHost PS driver. - This is not remarkably compatible with libcloud. The DH API allows - users to specify what image they want to create and whether to move - all their data to the (web) PS. It does NOT accept a name, size, or - location. The only information it returns is the PS's context id - Once the PS is ready it will appear in the list generated by list_ps. - """ - new_node = self.driver.create_node( - image = self.driver.list_images()[0], - size = self.driver.list_sizes()[0], - movedata = 'no', - ) - self.assertEqual(new_node.id, 'ps12345') - self.assertEqual(new_node.state, NodeState.PENDING) - self.assertTrue( - new_node.extra.has_key('type') and - new_node.extra['type'] == 'web' - ) - - def test_destroy_node(self): - """ - Test destroy_node for DreamHost PS driver - """ - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.destroy_node(node)) - - def test_destroy_node_failure(self): - """ - Test destroy_node failure for DreamHost PS driver - """ - node = self.driver.list_nodes()[0] - - DreamhostMockHttp.type = 'API_FAILURE' - self.assertFalse(self.driver.destroy_node(node)) - - def test_reboot_node(self): - """ - Test reboot_node for DreamHost PS driver. - """ - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.reboot_node(node)) - - def test_reboot_node_failure(self): - """ - Test reboot_node failure for DreamHost PS driver - """ - node = self.driver.list_nodes()[0] - - DreamhostMockHttp.type = 'API_FAILURE' - self.assertFalse(self.driver.reboot_node(node)) - - def test_resize_node(self): - """ - Test resize_node for DreamHost PS driver - """ - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver._resize_node(node, 400)) - - def test_resize_node_failure(self): - """ - Test reboot_node faliure for DreamHost PS driver - """ - node = self.driver.list_nodes()[0] - - DreamhostMockHttp.type = 'API_FAILURE' - self.assertFalse(self.driver._resize_node(node, 400)) - - def test_list_images(self): - """ - Test list_images for DreamHost PS driver. - """ - images = self.driver.list_images() - self.assertEqual(len(images), 2) - self.assertEqual(images[0].id, 'web') - self.assertEqual(images[0].name, 'web') - self.assertEqual(images[1].id, 'mysql') - self.assertEqual(images[1].name, 'mysql') - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 5) - - self.assertEqual(sizes[0].id, 'default') - self.assertEqual(sizes[0].bandwidth, None) - self.assertEqual(sizes[0].disk, None) - self.assertEqual(sizes[0].ram, 2300) - self.assertEqual(sizes[0].price, 115) - - def test_list_locations(self): - try: - self.driver.list_locations() - except NotImplementedError: - pass - - def test_list_locations_response(self): - self.assertRaises(NotImplementedError, self.driver.list_locations) - -class DreamhostMockHttp(MockHttp): - - def _BAD_AUTH_dreamhost_ps_list_ps(self, method, url, body, headers): - body = json.dumps({'data' : 'invalid_api_key', 'result' : 'error'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_add_ps(self, method, url, body, headers): - body = json.dumps({'data' : {'added_web' : 'ps12345'}, 'result' : 'success'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_list_ps(self, method, url, body, headers): - data = [{ - 'account_id' : 000000, - 'ip': '75.119.203.51', - 'memory_mb' : 500, - 'ps' : 'ps22174', - 'start_date' : '2010-02-25', - 'type' : 'web' - }, - { - 'account_id' : 000000, - 'ip' : '75.119.203.52', - 'memory_mb' : 1500, - 'ps' : 'ps22175', - 'start_date' : '2010-02-25', - 'type' : 'mysql' - }] - result = 'success' - body = json.dumps({'data' : data, 'result' : result}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_list_images(self, method, url, body, headers): - data = [{ - 'description' : 'Private web server', - 'image' : 'web' - }, - { - 'description' : 'Private MySQL server', - 'image' : 'mysql' - }] - result = 'success' - body = json.dumps({'data' : data, 'result' : result}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_reboot(self, method, url, body, headers): - body = json.dumps({'data' : 'reboot_scheduled', 'result' : 'success'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _API_FAILURE_dreamhost_ps_reboot(self, method, url, body, headers): - body = json.dumps({'data' : 'no_such_ps', 'result' : 'error'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_set_size(self, method, url, body, headers): - body = json.dumps({'data' : {'memory-mb' : '500'}, 'result' : 'success'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _API_FAILURE_dreamhost_ps_set_size(self, method, url, body, headers): - body = json.dumps({'data' : 'internal_error_setting_size', 'result' : 'error'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _dreamhost_ps_remove_ps(self, method, url, body, headers): - body = json.dumps({'data' : 'removed_web', 'result' : 'success'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _API_FAILURE_dreamhost_ps_remove_ps(self, method, url, body, headers): - body = json.dumps({'data' : 'no_such_ps', 'result' : 'error'}) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) - diff -Nru libcloud-0.5.0/test/compute/test_ec2.py libcloud-0.15.1/test/compute/test_ec2.py --- libcloud-0.5.0/test/compute/test_ec2.py 2011-04-09 14:48:31.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_ec2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,344 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.compute.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver -from libcloud.compute.drivers.ec2 import NimbusNodeDriver -from libcloud.compute.drivers.ec2 import EC2APNENodeDriver, IdempotentParamError -from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures - -from test.secrets import EC2_ACCESS_ID, EC2_SECRET - -class EC2Tests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) - EC2MockHttp.use_param = 'Action' - EC2MockHttp.type = None - self.driver = EC2NodeDriver(EC2_ACCESS_ID, EC2_SECRET) - - def test_create_node(self): - image = NodeImage(id='ami-be3adfd7', - name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml', - driver=self.driver) - size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) - node = self.driver.create_node(name='foo', image=image, size=size) - self.assertEqual(node.id, 'i-2ba64342') - - def test_create_node_idempotent(self): - EC2MockHttp.type = 'idempotent' - image = NodeImage(id='ami-be3adfd7', - name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml', - driver=self.driver) - size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) - token = 'testclienttoken' - node = self.driver.create_node(name='foo', image=image, size=size, - ex_clienttoken=token) - self.assertEqual(node.id, 'i-2ba64342') - self.assertEqual(node.extra['clienttoken'], token) - - # from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html - - # If you repeat the request with the same client token, but change - # another request parameter, Amazon EC2 returns an - # IdempotentParameterMismatch error. - - # In our case, changing the parameter doesn't actually matter since we - # are forcing the error response fixture. - EC2MockHttp.type = 'idempotent_mismatch' - - idem_error = None - try: - self.driver.create_node(name='foo', image=image, size=size, - ex_mincount='2', ex_maxcount='2', # different count - ex_clienttoken=token) - except IdempotentParamError, e: - idem_error = e - self.assertTrue(idem_error is not None) - - def test_create_node_no_availability_zone(self): - image = NodeImage(id='ami-be3adfd7', - name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml', - driver=self.driver) - size = NodeSize('m1.small', 'Small Instance', None, None, None, None, - driver=self.driver) - node = self.driver.create_node(name='foo', image=image, size=size) - location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver) - self.assertEqual(node.id, 'i-2ba64342') - node = self.driver.create_node(name='foo', image=image, size=size, - location=location) - self.assertEqual(node.id, 'i-2ba64342') - - def test_list_nodes(self): - node = self.driver.list_nodes()[0] - public_ips = sorted(node.public_ip) - self.assertEqual(node.id, 'i-4382922a') - self.assertEqual(len(node.public_ip), 2) - - self.assertEqual(public_ips[0], '1.2.3.4') - self.assertEqual(public_ips[1], '1.2.3.5') - - def test_list_location(self): - locations = self.driver.list_locations() - self.assertTrue(len(locations) > 0) - self.assertTrue(locations[0].availability_zone != None) - - def test_reboot_node(self): - node = Node('i-4382922a', None, None, None, None, self.driver) - ret = self.driver.reboot_node(node) - self.assertTrue(ret) - - def test_destroy_node(self): - node = Node('i-4382922a', None, None, None, None, self.driver) - ret = self.driver.destroy_node(node) - self.assertTrue(ret) - - def test_list_sizes(self): - region_old = self.driver.region_name - - names = [ ('ec2_us_east', 'us-east-1'), - ('ec2_us_west', 'us-west-1'), - ('ec2_eu_west', 'eu-west-1'), - ('ec2_ap_southeast', 'ap-southeast-1'), - ('ec2_ap_northeast', 'ap-northeast-1') - ] - for api_name, region_name in names: - self.driver.api_name = api_name - self.driver.region_name = region_name - sizes = self.driver.list_sizes() - - ids = [s.id for s in sizes] - self.assertTrue('t1.micro' in ids) - self.assertTrue('m1.small' in ids) - self.assertTrue('m1.large' in ids) - self.assertTrue('m1.xlarge' in ids) - self.assertTrue('c1.medium' in ids) - self.assertTrue('c1.xlarge' in ids) - self.assertTrue('m2.xlarge' in ids) - self.assertTrue('m2.2xlarge' in ids) - self.assertTrue('m2.4xlarge' in ids) - - if region_name == 'us-east-1': - self.assertEqual(len(sizes), 11) - self.assertTrue('cg1.4xlarge' in ids) - self.assertTrue('cc1.4xlarge' in ids) - else: - self.assertEqual(len(sizes), 9) - - self.driver.region_name = region_old - - def test_list_images(self): - images = self.driver.list_images() - image = images[0] - self.assertEqual(len(images), 1) - self.assertEqual(image.name, 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml') - self.assertEqual(image.id, 'ami-be3adfd7') - - def test_ex_list_availability_zones(self): - availability_zones = self.driver.ex_list_availability_zones() - availability_zone = availability_zones[0] - self.assertTrue(len(availability_zones) > 0) - self.assertEqual(availability_zone.name, 'eu-west-1a') - self.assertEqual(availability_zone.zone_state, 'available') - self.assertEqual(availability_zone.region_name, 'eu-west-1') - - def test_ex_describe_tags(self): - node = Node('i-4382922a', None, None, None, None, self.driver) - tags = self.driver.ex_describe_tags(node) - - self.assertEqual(len(tags), 3) - self.assertTrue('tag' in tags) - self.assertTrue('owner' in tags) - self.assertTrue('stack' in tags) - - def test_ex_create_tags(self): - node = Node('i-4382922a', None, None, None, None, self.driver) - self.driver.ex_create_tags(node, {'sample': 'tag'}) - - def test_ex_delete_tags(self): - node = Node('i-4382922a', None, None, None, None, self.driver) - self.driver.ex_delete_tags(node, {'sample': 'tag'}) - - def test_ex_describe_addresses_for_node(self): - node1 = Node('i-4382922a', None, None, None, None, self.driver) - ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1) - node2 = Node('i-4382922b', None, None, None, None, self.driver) - ip_addresses2 = sorted(self.driver.ex_describe_addresses_for_node(node2)) - node3 = Node('i-4382922g', None, None, None, None, self.driver) - ip_addresses3 = sorted(self.driver.ex_describe_addresses_for_node(node3)) - - self.assertEqual(len(ip_addresses1), 1) - self.assertEqual(ip_addresses1[0], '1.2.3.4') - - self.assertEqual(len(ip_addresses2), 2) - self.assertEqual(ip_addresses2[0], '1.2.3.5') - self.assertEqual(ip_addresses2[1], '1.2.3.6') - - self.assertEqual(len(ip_addresses3), 0) - - def test_ex_describe_addresses(self): - node1 = Node('i-4382922a', None, None, None, None, self.driver) - node2 = Node('i-4382922g', None, None, None, None, self.driver) - nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1]) - nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2]) - - self.assertEqual(len(nodes_elastic_ips1), 1) - self.assertTrue(node1.id in nodes_elastic_ips1) - self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4']) - - self.assertEqual(len(nodes_elastic_ips2), 1) - self.assertTrue(node2.id in nodes_elastic_ips2) - self.assertEqual(nodes_elastic_ips2[node2.id], []) - - def test_ex_change_node_size_same_size(self): - size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) - node = Node('i-4382922a', None, None, None, None, self.driver, - extra={'instancetype': 'm1.small'}) - - try: - self.driver.ex_change_node_size(node=node, new_size=size) - except ValueError: - pass - else: - self.fail('Same size was passed, but an exception was not thrown') - - def test_ex_change_node_size(self): - size = NodeSize('m1.large', 'Small Instance', None, None, None, None, driver=self.driver) - node = Node('i-4382922a', None, None, None, None, self.driver, - extra={'instancetype': 'm1.small'}) - - result = self.driver.ex_change_node_size(node=node, new_size=size) - self.assertTrue(result) - -class EC2MockHttp(MockHttp): - - fixtures = ComputeFileFixtures('ec2') - - def _DescribeInstances(self, method, url, body, headers): - body = self.fixtures.load('describe_instances.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _DescribeAvailabilityZones(self, method, url, body, headers): - body = self.fixtures.load('describe_availability_zones.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _RebootInstances(self, method, url, body, headers): - body = self.fixtures.load('reboot_instances.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _DescribeImages(self, method, url, body, headers): - body = self.fixtures.load('describe_images.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _RunInstances(self, method, url, body, headers): - body = self.fixtures.load('run_instances.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _idempotent_RunInstances(self, method, url, body, headers): - body = self.fixtures.load('run_instances_idem.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _idempotent_mismatch_RunInstances(self, method, url, body, headers): - body = self.fixtures.load('run_instances_idem_mismatch.xml') - return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) - - def _TerminateInstances(self, method, url, body, headers): - body = self.fixtures.load('terminate_instances.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _DescribeTags(self, method, url, body, headers): - body = self.fixtures.load('describe_tags.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _CreateTags(self, method, url, body, headers): - body = self.fixtures.load('create_tags.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _DeleteTags(self, method, url, body, headers): - body = self.fixtures.load('delete_tags.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _DescribeAddresses(self, method, url, body, headers): - body = self.fixtures.load('describe_addresses_multi.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _ModifyInstanceAttribute(self, method, url, body, headers): - body = self.fixtures.load('modify_instance_attribute.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _idempotent_CreateTags(self, method, url, body, headers): - body = self.fixtures.load('create_tags.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -class EC2APSETests(EC2Tests): - def setUp(self): - EC2APSENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) - EC2MockHttp.use_param = 'Action' - EC2MockHttp.type = None - self.driver = EC2APSENodeDriver(EC2_ACCESS_ID, EC2_SECRET) - -class EC2APNETests(EC2Tests): - def setUp(self): - EC2APNENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) - EC2MockHttp.use_param = 'Action' - EC2MockHttp.type = None - self.driver = EC2APNENodeDriver(EC2_ACCESS_ID, EC2_SECRET) - -class NimbusTests(EC2Tests): - def setUp(self): - NimbusNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) - EC2MockHttp.use_param = 'Action' - EC2MockHttp.type = None - self.driver = NimbusNodeDriver(EC2_ACCESS_ID, EC2_SECRET, - host="some.nimbuscloud.com") - - def test_ex_describe_addresses_for_node(self): - # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. - node = Node('i-4382922a', None, None, None, None, self.driver) - ip_addresses = self.driver.ex_describe_addresses_for_node(node) - self.assertEqual(len(ip_addresses), 0) - - def test_ex_describe_addresses(self): - # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. - node = Node('i-4382922a', None, None, None, None, self.driver) - nodes_elastic_ips = self.driver.ex_describe_addresses([node]) - - self.assertEqual(len(nodes_elastic_ips), 1) - self.assertEqual(len(nodes_elastic_ips[node.id]), 0) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - - ids = [s.id for s in sizes] - self.assertTrue('m1.small' in ids) - self.assertTrue('m1.large' in ids) - self.assertTrue('m1.xlarge' in ids) - - def test_list_nodes(self): - # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. - node = self.driver.list_nodes()[0] - public_ips = node.public_ip - self.assertEqual(node.id, 'i-4382922a') - self.assertEqual(len(node.public_ip), 1) - self.assertEqual(public_ips[0], '1.2.3.5') - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_ecp.py libcloud-0.15.1/test/compute/test_ecp.py --- libcloud-0.5.0/test/compute/test_ecp.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_ecp.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.compute.drivers.ecp import ECPNodeDriver -from libcloud.compute.types import NodeState - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures - -from test.secrets import ECP_USER_NAME, ECP_PASSWORD - -class ECPTests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - ECPNodeDriver.connectionCls.conn_classes = (None, - ECPMockHttp) - self.driver = ECPNodeDriver(ECP_USER_NAME, ECP_PASSWORD) - - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertEqual(len(nodes),2) - node = nodes[0] - self.assertEqual(node.id, '1') - self.assertEqual(node.name, 'dummy-1') - self.assertEqual(node.public_ip[0], "42.78.124.75") - self.assertEqual(node.state, NodeState.RUNNING) - - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes),3) - size = sizes[0] - self.assertEqual(size.id,'1') - self.assertEqual(size.ram,512) - self.assertEqual(size.disk,0) - self.assertEqual(size.bandwidth,0) - self.assertEqual(size.price,0) - - def test_list_images(self): - images = self.driver.list_images() - self.assertEqual(len(images),2) - self.assertEqual(images[0].name,"centos54: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") - self.assertEqual(images[0].id, "1") - self.assertEqual(images[1].name,"centos54 two: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") - self.assertEqual(images[1].id, "2") - - def test_reboot_node(self): - # Raises exception on failure - node = self.driver.list_nodes()[0] - self.driver.reboot_node(node) - - def test_destroy_node(self): - # Raises exception on failure - node = self.driver.list_nodes()[0] - self.driver.destroy_node(node) - - def test_create_node(self): - # Raises exception on failure - size = self.driver.list_sizes()[0] - image = self.driver.list_images()[0] - node = self.driver.create_node(name="api.ivan.net.nz", image=image, size=size) - self.assertEqual(node.name, "api.ivan.net.nz") - self.assertEqual(node.id, "1234") - -class ECPMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('ecp') - - def _modules_hosting(self, method, url, body, headers): - headers = {} - headers['set-cookie'] = 'vcloud-token=testtoken' - body = 'Anything' - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - - def _rest_hosting_vm_1(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('vm_1_get.json') - if method == 'POST': - if body.find('delete',0): - body = self.fixtures.load('vm_1_action_delete.json') - if body.find('stop',0): - body = self.fixtures.load('vm_1_action_stop.json') - if body.find('start',0): - body = self.fixtures.load('vm_1_action_start.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _rest_hosting_vm(self, method, url, body, headers): - if method == 'PUT': - body = self.fixtures.load('vm_put.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _rest_hosting_vm_list(self, method, url, body, headers): - body = self.fixtures.load('vm_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _rest_hosting_htemplate_list(self, method, url, body, headers): - body = self.fixtures.load('htemplate_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _rest_hosting_network_list(self, method, url, body, headers): - body = self.fixtures.load('network_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _rest_hosting_ptemplate_list(self, method, url, body, headers): - body = self.fixtures.load('ptemplate_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_elastichosts.py libcloud-0.15.1/test/compute/test_elastichosts.py --- libcloud-0.5.0/test/compute/test_elastichosts.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_elastichosts.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Copyright 2009 RedRata Ltd - -import sys -import unittest -import httplib - -from libcloud.compute.base import Node -from libcloud.compute.drivers.elastichosts import \ - (ElasticHostsBaseNodeDriver as ElasticHosts, - ElasticHostsException) -from libcloud.common.types import InvalidCredsError, MalformedResponseError - -from test import MockHttp -from test.file_fixtures import ComputeFileFixtures - -class ElasticHostsTestCase(unittest.TestCase): - - def setUp(self): - ElasticHosts.connectionCls.conn_classes = (None, - ElasticHostsHttp) - ElasticHostsHttp.type = None - self.driver = ElasticHosts('foo', 'bar') - self.node = Node(id=72258, name=None, state=None, public_ip=None, - private_ip=None, driver=self.driver) - - def test_invalid_creds(self): - ElasticHostsHttp.type = 'UNAUTHORIZED' - try: - self.driver.list_nodes() - except InvalidCredsError, e: - self.assertEqual(True, isinstance(e, InvalidCredsError)) - else: - self.fail('test should have thrown') - - def test_malformed_response(self): - ElasticHostsHttp.type = 'MALFORMED' - try: - self.driver.list_nodes() - except MalformedResponseError: - pass - else: - self.fail('test should have thrown') - - def test_parse_error(self): - ElasticHostsHttp.type = 'PARSE_ERROR' - try: - self.driver.list_nodes() - except Exception, e: - self.assertTrue(str(e).find('X-Elastic-Error') != -1) - else: - self.fail('test should have thrown') - - def test_ex_set_node_configuration(self): - success = self.driver.ex_set_node_configuration(node=self.node, - name='name', - cpu='2') - - def test_ex_set_node_configuration_invalid_keys(self): - try: - self.driver.ex_set_node_configuration(node=self.node, foo='bar') - except ElasticHostsException: - pass - else: - self.fail('Invalid option specified, but an exception was not thrown') - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertTrue(isinstance(nodes, list)) - self.assertEqual(len(nodes), 1) - - node = nodes[0] - self.assertEqual(node.public_ip[0], "1.2.3.4") - self.assertEqual(node.public_ip[1], "1.2.3.5") - self.assertEqual(node.extra['smp'], 1) - - def test_list_sizes(self): - images = self.driver.list_sizes() - self.assertEqual(len(images), 6) - image = [i for i in images if i.id == 'small'][0] - self.assertEqual(image.id, 'small') - self.assertEqual(image.name, 'Small instance') - self.assertEqual(image.cpu, 2000) - self.assertEqual(image.ram, 1700) - self.assertEqual(image.disk, 160) - self.assertTrue(isinstance(image.price, float)) - - def test_list_images(self): - sizes = self.driver.list_images() - self.assertEqual(len(sizes), 8) - size = [s for s in sizes if \ - s.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0] - self.assertEqual(size.id, '38df0986-4d85-4b76-b502-3878ffc80161') - self.assertEqual(size.name, 'CentOS Linux 5.5') - - def test_reboot_node(self): - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.reboot_node(node)) - - def test_destroy_node(self): - node = self.driver.list_nodes()[0] - self.assertTrue(self.driver.destroy_node(node)) - - def test_create_node(self): - sizes = self.driver.list_sizes() - size = [s for s in sizes if \ - s.id == 'large'][0] - images = self.driver.list_images() - image = [i for i in images if \ - i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0] - - self.assertTrue(self.driver.create_node(name="api.ivan.net.nz", - image=image, size=size)) - -class ElasticHostsHttp(MockHttp): - - fixtures = ComputeFileFixtures('elastichosts') - - def _servers_info_UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _servers_info_MALFORMED(self, method, url, body, headers): - body = "{malformed: '" - return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _servers_info_PARSE_ERROR(self, method, url, body, headers): - return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _drives_create(self, method, url, body, headers): - body = self.fixtures.load('drives_create.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method, url, body, headers): - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers): - body = self.fixtures.load('drives_info.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_create(self, method, url, body, headers): - body = self.fixtures.load('servers_create.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_info(self, method, url, body, headers): - body = self.fixtures.load('servers_info.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _servers_72258_set(self, method, url, body, headers): - body = '{}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_gandi.py libcloud-0.15.1/test/compute/test_gandi.py --- libcloud-0.5.0/test/compute/test_gandi.py 2011-05-21 15:42:52.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_gandi.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import sys -import random -import string -import httplib -import xmlrpclib - -from libcloud.compute.drivers.gandi import GandiNodeDriver as Gandi -from libcloud.compute.types import NodeState - -from xml.etree import ElementTree as ET -from test import MockHttp -from test.file_fixtures import ComputeFileFixtures -from test.secrets import GANDI_USER - -class MockGandiTransport(xmlrpclib.Transport): - - def request(self, host, handler, request_body, verbose=0): - self.verbose = 0 - method = ET.XML(request_body).find('methodName').text - mock = GandiMockHttp(host, 80) - mock.request('POST', "%s/%s" % (handler, method)) - resp = mock.getresponse() - - if sys.version[0] == '2' and sys.version[2] == '7': - response = self.parse_response(resp) - else: - response = self.parse_response(resp.body) - return response - -class GandiTests(unittest.TestCase): - - node_name = 'test2' - def setUp(self): - Gandi.connectionCls.proxyCls.transportCls = [MockGandiTransport, MockGandiTransport] - self.driver = Gandi(GANDI_USER) - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertTrue(len(nodes)>0) - - def test_list_locations(self): - loc = filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations())[0] - self.assertEqual(loc.country, 'France') - - def test_list_images(self): - loc = filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations())[0] - images = self.driver.list_images(loc) - self.assertTrue(len(images)>2) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertTrue(len(sizes)>=1) - - def test_destroy_node_running(self): - nodes = self.driver.list_nodes() - test_node = filter(lambda x: x.state == NodeState.RUNNING, nodes)[0] - self.assertTrue(self.driver.destroy_node(test_node)) - - def test_destroy_node_halted(self): - nodes = self.driver.list_nodes() - test_node = filter(lambda x: x.state == NodeState.TERMINATED, nodes)[0] - self.assertTrue(self.driver.destroy_node(test_node)) - - def test_reboot_node(self): - nodes = self.driver.list_nodes() - test_node = filter(lambda x: x.state == NodeState.RUNNING, nodes)[0] - self.assertTrue(self.driver.reboot_node(test_node)) - - def test_create_node(self): - login = 'libcloud' - passwd = ''.join(random.choice(string.letters + string.digits) for i in xrange(10)) - # Get france datacenter - loc = filter(lambda x: 'france' in x.country.lower(), self.driver.list_locations())[0] - # Get a debian image - images = self.driver.list_images(loc) - images = [x for x in images if x.name.lower().startswith('debian')] - img = filter(lambda x: '5' in x.name, images)[0] - # Get a configuration size - size = self.driver.list_sizes()[0] - node = self.driver.create_node(name=self.node_name,login=login,password=passwd,image=img,location=loc,size=size) - self.assertEqual(node.name, self.node_name) - -class GandiMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('gandi') - - def _xmlrpc_2_0__datacenter_list(self, method, url, body, headers): - body = self.fixtures.load('datacenter_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__image_list(self, method, url, body, headers): - body = self.fixtures.load('image_list_dc0.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_list(self, method, url, body, headers): - body = self.fixtures.load('vm_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__ip_list(self, method, url, body, headers): - body = self.fixtures.load('ip_list.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__account_info(self, method, url, body, headers): - body = self.fixtures.load('account_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_info(self, method, url, body, headers): - body = self.fixtures.load('vm_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_delete(self, method, url, body, headers): - body = self.fixtures.load('vm_delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__operation_info(self, method, url, body, headers): - body = self.fixtures.load('operation_info.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_create_from(self, method, url, body, headers): - body = self.fixtures.load('vm_create_from.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_reboot(self, method, url, body, headers): - body = self.fixtures.load('vm_reboot.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _xmlrpc_2_0__vm_stop(self, method, url, body, headers): - body = self.fixtures.load('vm_stop.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_gogrid.py libcloud-0.15.1/test/compute/test_gogrid.py --- libcloud-0.5.0/test/compute/test_gogrid.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_gogrid.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,279 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import httplib -import sys -import unittest -import urlparse - -from libcloud.compute.base import NodeState, NodeLocation -from libcloud.common.types import LibcloudError, InvalidCredsError -from libcloud.common.gogrid import GoGridIpAddress -from libcloud.compute.drivers.gogrid import GoGridNodeDriver -from libcloud.compute.base import Node, NodeImage, NodeSize - -from test import MockHttp # pylint: disable-msg=E0611 -from test.compute import TestCaseMixin # pylint: disable-msg=E0611 -from test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 - -class GoGridTests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - GoGridNodeDriver.connectionCls.conn_classes = (None, GoGridMockHttp) - GoGridMockHttp.type = None - self.driver = GoGridNodeDriver("foo", "bar") - - def _get_test_512Mb_node_size(self): - return NodeSize(id='512Mb', - name=None, - ram=None, - disk=None, - bandwidth=None, - price=None, - driver=self.driver) - - def test_create_node(self): - image = NodeImage(1531, None, self.driver) - node = self.driver.create_node( - name='test1', - image=image, - size=self._get_test_512Mb_node_size()) - self.assertEqual(node.name, 'test1') - self.assertTrue(node.id is not None) - self.assertEqual(node.extra['password'], 'bebebe') - - def test_list_nodes(self): - node = self.driver.list_nodes()[0] - - self.assertEqual(node.id, '90967') - self.assertEqual(node.extra['password'], 'bebebe') - - def test_reboot_node(self): - node = Node(90967, None, None, None, None, self.driver) - ret = self.driver.reboot_node(node) - self.assertTrue(ret) - - def test_reboot_node_not_successful(self): - GoGridMockHttp.type = 'FAIL' - node = Node(90967, None, None, None, None, self.driver) - - try: - self.driver.reboot_node(node) - except Exception: - pass - else: - self.fail('Exception was not thrown') - - def test_destroy_node(self): - node = Node(90967, None, None, None, None, self.driver) - ret = self.driver.destroy_node(node) - self.assertTrue(ret) - - def test_list_images(self): - images = self.driver.list_images() - image = images[0] - self.assertEqual(len(images), 4) - self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') - self.assertEqual(image.id, '1531') - - location = NodeLocation(id='gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img', - name='test location', country='Slovenia', - driver=self.driver) - images = self.driver.list_images(location=location) - image = images[0] - self.assertEqual(len(images), 4) - self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') - self.assertEqual(image.id, '1531') - - def test_malformed_reply(self): - GoGridMockHttp.type = 'FAIL' - try: - self.driver.list_images() - except LibcloudError, e: - self.assertTrue(isinstance(e, LibcloudError)) - else: - self.fail("test should have thrown") - - def test_invalid_creds(self): - GoGridMockHttp.type = 'FAIL' - try: - self.driver.list_nodes() - except InvalidCredsError, e: - self.assertTrue(e.driver is not None) - self.assertEqual(e.driver.name, self.driver.name) - else: - self.fail("test should have thrown") - - def test_node_creation_without_free_public_ips(self): - GoGridMockHttp.type = 'NOPUBIPS' - try: - image = NodeImage(1531, None, self.driver) - self.driver.create_node( - name='test1', - image=image, - size=self._get_test_512Mb_node_size()) - except LibcloudError, e: - self.assertTrue(isinstance(e, LibcloudError)) - self.assertTrue(e.driver is not None) - self.assertEqual(e.driver.name, self.driver.name) - else: - self.fail("test should have thrown") - - def test_list_locations(self): - locations = self.driver.list_locations() - location_names = [location.name for location in locations] - - self.assertEqual(len(locations), 2) - for i in 0, 1: - self.assertTrue(isinstance(locations[i], NodeLocation)) - self.assertTrue("US-West-1" in location_names) - self.assertTrue("US-East-1" in location_names) - - def test_ex_save_image(self): - node = self.driver.list_nodes()[0] - image = self.driver.ex_save_image(node, "testimage") - self.assertEqual(image.name, "testimage") - - def test_ex_edit_image(self): - image = self.driver.list_images()[0] - ret = self.driver.ex_edit_image(image=image, public=False, - ex_description="test", name="testname") - - self.assertTrue(isinstance(ret, NodeImage)) - - def test_ex_edit_node(self): - node = Node(id=90967, name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = self.driver.ex_edit_node(node=node, - size=self._get_test_512Mb_node_size()) - - self.assertTrue(isinstance(ret, Node)) - - def test_ex_list_ips(self): - ips = self.driver.ex_list_ips() - - expected_ips = {"192.168.75.66": GoGridIpAddress(id="5348099", - ip="192.168.75.66", public=True, state="Unassigned", - subnet="192.168.75.64/255.255.255.240"), - "192.168.75.67": GoGridIpAddress(id="5348100", - ip="192.168.75.67", public=True, state="Assigned", - subnet="192.168.75.64/255.255.255.240"), - "192.168.75.68": GoGridIpAddress(id="5348101", - ip="192.168.75.68", public=False, state="Unassigned", - subnet="192.168.75.64/255.255.255.240")} - - self.assertEqual(len(expected_ips), 3) - - for ip in ips: - self.assertTrue(ip.ip in expected_ips) - self.assertEqual(ip.public, expected_ips[ip.ip].public) - self.assertEqual(ip.state, expected_ips[ip.ip].state) - self.assertEqual(ip.subnet, expected_ips[ip.ip].subnet) - - del expected_ips[ip.ip] - - self.assertEqual(len(expected_ips), 0) - - def test_get_state_invalid(self): - state = self.driver._get_state('invalid') - self.assertEqual(state, NodeState.UNKNOWN) - -class GoGridMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('gogrid') - - def _api_grid_image_list(self, method, url, body, headers): - body = self.fixtures.load('image_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_image_list_FAIL(self, method, url, body, headers): - body = "

some non valid json here

" - return (httplib.SERVICE_UNAVAILABLE, body, {}, - httplib.responses[httplib.SERVICE_UNAVAILABLE]) - - def _api_grid_server_list(self, method, url, body, headers): - body = self.fixtures.load('server_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - _api_grid_server_list_NOPUBIPS = _api_grid_server_list - - def _api_grid_server_list_FAIL(self, method, url, body, headers): - return (httplib.FORBIDDEN, - "123", {}, httplib.responses[httplib.FORBIDDEN]) - - def _api_grid_ip_list(self, method, url, body, headers): - body = self.fixtures.load('ip_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_ip_list_NOPUBIPS(self, method, url, body, headers): - body = self.fixtures.load('ip_list_empty.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_server_power(self, method, url, body, headers): - body = self.fixtures.load('server_power.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_server_power_FAIL(self, method, url, body, headers): - body = self.fixtures.load('server_power_fail.json') - return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_server_add(self, method, url, body, headers): - body = self.fixtures.load('server_add.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - _api_grid_server_add_NOPUBIPS = _api_grid_server_add - - def _api_grid_server_delete(self, method, url, body, headers): - body = self.fixtures.load('server_delete.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_server_edit(self, method, url, body, headers): - body = self.fixtures.load('server_edit.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_support_password_list(self, method, url, body, headers): - body = self.fixtures.load('password_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - _api_support_password_list_NOPUBIPS = _api_support_password_list - - def _api_grid_image_save(self, method, url, body, headers): - body = self.fixtures.load('image_save.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_image_edit(self, method, url, body, headers): - # edit method is quite similar to save method from the response - # perspective - body = self.fixtures.load('image_save.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_common_lookup_list(self, method, url, body, headers): - _valid_lookups = ("ip.datacenter",) - - try: - from urlparse import parse_qs - except ImportError: - from cgi import parse_qs - - lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0] - if lookup in _valid_lookups: - fixture_path = "lookup_list_%s.json" % \ - (lookup.replace(".", "_")) - else: - raise NotImplementedError - body = self.fixtures.load(fixture_path) - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_ibm_sbc.py libcloud-0.15.1/test/compute/test_ibm_sbc.py --- libcloud-0.5.0/test/compute/test_ibm_sbc.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_ibm_sbc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,206 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -import unittest -import httplib -import sys - -from libcloud.compute.types import InvalidCredsError -from libcloud.compute.drivers.ibm_sbc import IBMNodeDriver as IBM -from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures -from test.secrets import IBM_USER, IBM_SECRET - -class IBMTests(unittest.TestCase, TestCaseMixin): - """ - Tests the IBM Developer Cloud driver. - """ - - def setUp(self): - IBM.connectionCls.conn_classes = (None, IBMMockHttp) - IBMMockHttp.type = None - self.driver = IBM(IBM_USER, IBM_SECRET) - - def test_auth(self): - IBMMockHttp.type = 'UNAUTHORIZED' - - try: - self.driver.list_nodes() - except InvalidCredsError, e: - self.assertTrue(isinstance(e, InvalidCredsError)) - self.assertEquals(e.value, '401: Unauthorized') - else: - self.fail('test should have thrown') - - def test_list_nodes(self): - ret = self.driver.list_nodes() - self.assertEquals(len(ret), 3) - self.assertEquals(ret[0].id, '26557') - self.assertEquals(ret[0].name, 'Insight Instance') - self.assertEquals(ret[0].public_ip, '129.33.196.128') - self.assertEquals(ret[0].private_ip, None) # Private IPs not supported - self.assertEquals(ret[1].public_ip, None) # Node is non-active (no IP) - self.assertEquals(ret[1].private_ip, None) - self.assertEquals(ret[1].id, '28193') - - def test_list_sizes(self): - ret = self.driver.list_sizes() - self.assertEquals(len(ret), 9) # 9 instance configurations supported - self.assertEquals(ret[0].id, 'BRZ32.1/2048/60*175') - self.assertEquals(ret[1].id, 'BRZ64.2/4096/60*500*350') - self.assertEquals(ret[2].id, 'COP32.1/2048/60') - self.assertEquals(ret[0].name, 'Bronze 32 bit') - self.assertEquals(ret[0].disk, None) - - def test_list_images(self): - ret = self.driver.list_images() - self.assertEqual(len(ret), 21) - self.assertEqual(ret[10].name, "Rational Asset Manager 7.2.0.1") - self.assertEqual(ret[9].id, '10002573') - - def test_list_locations(self): - ret = self.driver.list_locations() - self.assertEquals(len(ret), 1) - self.assertEquals(ret[0].id, '1') - self.assertEquals(ret[0].name, 'US North East: Poughkeepsie, NY') - self.assertEquals(ret[0].country, 'US') - - def test_create_node(self): - # Test creation of node - IBMMockHttp.type = 'CREATE' - image = NodeImage(id=11, name='Rational Insight', driver=self.driver) - size = NodeSize('LARGE', 'LARGE', None, None, None, None, self.driver) - location = NodeLocation('1', 'POK', 'US', driver=self.driver) - ret = self.driver.create_node(name='RationalInsight4', - image=image, - size=size, - location=location, - publicKey='MyPublicKey', - configurationData = { - 'insight_admin_password': 'myPassword1', - 'db2_admin_password': 'myPassword2', - 'report_user_password': 'myPassword3'}) - self.assertTrue(isinstance(ret, Node)) - self.assertEquals(ret.name, 'RationalInsight4') - - # Test creation attempt with invalid location - IBMMockHttp.type = 'CREATE_INVALID' - location = NodeLocation('3', 'DOESNOTEXIST', 'US', driver=self.driver) - try: - ret = self.driver.create_node(name='RationalInsight5', - image=image, - size=size, - location=location, - publicKey='MyPublicKey', - configurationData = { - 'insight_admin_password': 'myPassword1', - 'db2_admin_password': 'myPassword2', - 'report_user_password': 'myPassword3'}) - except Exception, e: - self.assertEquals(e.args[0], 'Error 412: No DataCenter with id: 3') - else: - self.fail('test should have thrown') - - def test_destroy_node(self): - # Delete existant node - nodes = self.driver.list_nodes() # retrieves 3 nodes - self.assertEquals(len(nodes), 3) - IBMMockHttp.type = 'DELETE' - toDelete = nodes[1] - ret = self.driver.destroy_node(toDelete) - self.assertTrue(ret) - - # Delete non-existant node - IBMMockHttp.type = 'DELETED' - nodes = self.driver.list_nodes() # retrieves 2 nodes - self.assertEquals(len(nodes), 2) - try: - self.driver.destroy_node(toDelete) # delete non-existent node - except Exception, e: - self.assertEquals(e.args[0], 'Error 404: Invalid Instance ID 28193') - else: - self.fail('test should have thrown') - - def test_reboot_node(self): - nodes = self.driver.list_nodes() - IBMMockHttp.type = 'REBOOT' - - # Reboot active node - self.assertEquals(len(nodes), 3) - ret = self.driver.reboot_node(nodes[0]) - self.assertTrue(ret) - - # Reboot inactive node - try: - ret = self.driver.reboot_node(nodes[1]) - except Exception, e: - self.assertEquals(e.args[0], 'Error 412: Instance must be in the Active state') - else: - self.fail('test should have thrown') - -class IBMMockHttp(MockHttp): - fixtures = ComputeFileFixtures('ibm_sbc') - - def _computecloud_enterprise_api_rest_20100331_instances(self, method, url, body, headers): - body = self.fixtures.load('instances.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_DELETED(self, method, url, body, headers): - body = self.fixtures.load('instances_deleted.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) - - def _computecloud_enterprise_api_rest_20100331_offerings_image(self, method, url, body, headers): - body = self.fixtures.load('images.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_locations(self, method, url, body, headers): - body = self.fixtures.load('locations.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_26557_REBOOT(self, method, url, body, headers): - body = self.fixtures.load('reboot_active.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_28193_REBOOT(self, method, url, body, headers): - return (412, 'Error 412: Instance must be in the Active state', {}, 'Precondition Failed') - - def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETE(self, method, url, body, headers): - body = self.fixtures.load('delete.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETED(self, method, url, body, headers): - return (404, 'Error 404: Invalid Instance ID 28193', {}, 'Precondition Failed') - - def _computecloud_enterprise_api_rest_20100331_instances_CREATE(self, method, url, body, headers): - body = self.fixtures.load('create.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _computecloud_enterprise_api_rest_20100331_instances_CREATE_INVALID(self, method, url, body, headers): - return (412, 'Error 412: No DataCenter with id: 3', {}, 'Precondition Failed') - - # This is only to accomodate the response tests built into test\__init__.py - def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers): - if method == 'DELETE': - body = self.fixtures.load('delete.xml') - else: - body = self.fixtures.load('reboot_active.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_linode.py libcloud-0.15.1/test/compute/test_linode.py --- libcloud-0.5.0/test/compute/test_linode.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_linode.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Maintainer: Jed Smith -# Based upon code written by Alex Polvi -# - -import sys -import unittest -import httplib - -from libcloud.compute.drivers.linode import LinodeNodeDriver -from libcloud.compute.base import Node, NodeAuthPassword - -from test import MockHttp -from test.compute import TestCaseMixin - -class LinodeTest(unittest.TestCase, TestCaseMixin): - # The Linode test suite - - def setUp(self): - LinodeNodeDriver.connectionCls.conn_classes = (None, LinodeMockHttp) - LinodeMockHttp.use_param = 'api_action' - self.driver = LinodeNodeDriver('foo') - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertEqual(len(nodes), 1) - node = nodes[0] - self.assertEqual(node.id, "8098") - self.assertEqual(node.name, 'api-node3') - self.assertTrue('75.127.96.245' in node.public_ip) - self.assertEqual(node.private_ip, []) - - def test_reboot_node(self): - # An exception would indicate failure - node = self.driver.list_nodes()[0] - self.driver.reboot_node(node) - - def test_destroy_node(self): - # An exception would indicate failure - node = self.driver.list_nodes()[0] - self.driver.destroy_node(node) - - def test_create_node(self): - # Will exception on failure - self.driver.create_node(name="Test", - location=self.driver.list_locations()[0], - size=self.driver.list_sizes()[0], - image=self.driver.list_images()[6], - auth=NodeAuthPassword("test123")) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 10) - for size in sizes: - self.assertEqual(size.ram, int(size.name.split(" ")[1])) - - def test_list_images(self): - images = self.driver.list_images() - self.assertEqual(len(images), 22) - - def test_create_node_response(self): - # should return a node object - node = self.driver.create_node(name="node-name", - location=self.driver.list_locations()[0], - size=self.driver.list_sizes()[0], - image=self.driver.list_images()[0], - auth=NodeAuthPassword("foobar")) - self.assertTrue(isinstance(node[0], Node)) - - -class LinodeMockHttp(MockHttp): - def _avail_datacenters(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"avail.datacenters","DATA":[{"DATACENTERID":2,"LOCATION":"Dallas, TX, USA"},{"DATACENTERID":3,"LOCATION":"Fremont, CA, USA"},{"DATACENTERID":4,"LOCATION":"Atlanta, GA, USA"},{"DATACENTERID":6,"LOCATION":"Newark, NJ, USA"}]}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _avail_linodeplans(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"avail.linodeplans","DATA":[{"AVAIL":{"2":27,"3":0,"4":0,"6":0},"DISK":16,"PRICE":19.95,"PLANID":1,"LABEL":"Linode 360","RAM":360,"XFER":200},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":24,"PRICE":29.95,"PLANID":2,"LABEL":"Linode 540","RAM":540,"XFER":300},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":32,"PRICE":39.95,"PLANID":3,"LABEL":"Linode 720","RAM":720,"XFER":400},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":48,"PRICE":59.95,"PLANID":4,"LABEL":"Linode 1080","RAM":1080,"XFER":600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":64,"PRICE":79.95,"PLANID":5,"LABEL":"Linode 1440","RAM":1440,"XFER":800},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":128,"PRICE":159.95,"PLANID":6,"LABEL":"Linode 2880","RAM":2880,"XFER":1600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":256,"PRICE":319.95,"PLANID":7,"LABEL":"Linode 5760","RAM":5760,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":384,"PRICE":479.95,"PLANID":8,"LABEL":"Linode 8640","RAM":8640,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":512,"PRICE":639.95,"PLANID":9,"LABEL":"Linode 11520","RAM":11520,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":640,"PRICE":799.95,"PLANID":10,"LABEL":"Linode 14400","RAM":14400,"XFER":2000}]}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _avail_distributions(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"avail.distributions","DATA":[{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Arch Linux 2007.08","MINIMAGESIZE":436,"DISTRIBUTIONID":38,"CREATE_DT":"2007-10-24 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Centos 5.0","MINIMAGESIZE":594,"DISTRIBUTIONID":32,"CREATE_DT":"2007-04-27 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Centos 5.2","MINIMAGESIZE":950,"DISTRIBUTIONID":46,"CREATE_DT":"2008-11-30 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Centos 5.2 64bit","MINIMAGESIZE":980,"DISTRIBUTIONID":47,"CREATE_DT":"2008-11-30 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Debian 4.0","MINIMAGESIZE":200,"DISTRIBUTIONID":28,"CREATE_DT":"2007-04-18 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Debian 4.0 64bit","MINIMAGESIZE":220,"DISTRIBUTIONID":48,"CREATE_DT":"2008-12-02 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Debian 5.0","MINIMAGESIZE":200,"DISTRIBUTIONID":50,"CREATE_DT":"2009-02-19 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Debian 5.0 64bit","MINIMAGESIZE":300,"DISTRIBUTIONID":51,"CREATE_DT":"2009-02-19 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Fedora 8","MINIMAGESIZE":740,"DISTRIBUTIONID":40,"CREATE_DT":"2007-11-09 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Fedora 9","MINIMAGESIZE":1175,"DISTRIBUTIONID":43,"CREATE_DT":"2008-06-09 15:15:21.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Gentoo 2007.0","MINIMAGESIZE":1800,"DISTRIBUTIONID":35,"CREATE_DT":"2007-08-29 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Gentoo 2008.0","MINIMAGESIZE":1500,"DISTRIBUTIONID":52,"CREATE_DT":"2009-03-20 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Gentoo 2008.0 64bit","MINIMAGESIZE":2500,"DISTRIBUTIONID":53,"CREATE_DT":"2009-04-04 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"OpenSUSE 11.0","MINIMAGESIZE":850,"DISTRIBUTIONID":44,"CREATE_DT":"2008-08-21 08:32:16.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Slackware 12.0","MINIMAGESIZE":315,"DISTRIBUTIONID":34,"CREATE_DT":"2007-07-16 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Slackware 12.2","MINIMAGESIZE":500,"DISTRIBUTIONID":54,"CREATE_DT":"2009-04-04 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 8.04 LTS","MINIMAGESIZE":400,"DISTRIBUTIONID":41,"CREATE_DT":"2008-04-23 15:11:29.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Ubuntu 8.04 LTS 64bit","MINIMAGESIZE":350,"DISTRIBUTIONID":42,"CREATE_DT":"2008-06-03 12:51:11.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 8.10","MINIMAGESIZE":220,"DISTRIBUTIONID":45,"CREATE_DT":"2008-10-30 23:23:03.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Ubuntu 8.10 64bit","MINIMAGESIZE":230,"DISTRIBUTIONID":49,"CREATE_DT":"2008-12-02 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 9.04","MINIMAGESIZE":350,"DISTRIBUTIONID":55,"CREATE_DT":"2009-04-23 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Ubuntu 9.04 64bit","MINIMAGESIZE":350,"DISTRIBUTIONID":56,"CREATE_DT":"2009-04-23 00:00:00.0"}]}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_create(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.create","DATA":{"LinodeID":8098}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_disk_createfromdistribution(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.disk.createFromDistribution","DATA":{"JobID":1298,"DiskID":55647}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_delete(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.delete","DATA":{"LinodeID":8098}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_update(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.update","DATA":{"LinodeID":8098}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_reboot(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.reboot","DATA":{"JobID":1305}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _avail_kernels(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"avail.kernels","DATA":[{"LABEL":"Latest 2.6 Stable (2.6.18.8-linode19)","ISXEN":1,"KERNELID":60},{"LABEL":"2.6.18.8-linode19","ISXEN":1,"KERNELID":103},{"LABEL":"2.6.30.5-linode20","ISXEN":1,"KERNELID":105},{"LABEL":"Latest 2.6 Stable (2.6.18.8-x86_64-linode7)","ISXEN":1,"KERNELID":107},{"LABEL":"2.6.18.8-x86_64-linode7","ISXEN":1,"KERNELID":104},{"LABEL":"2.6.30.5-x86_64-linode8","ISXEN":1,"KERNELID":106},{"LABEL":"pv-grub-x86_32","ISXEN":1,"KERNELID":92},{"LABEL":"pv-grub-x86_64","ISXEN":1,"KERNELID":95},{"LABEL":"Recovery - Finnix (kernel)","ISXEN":1,"KERNELID":61},{"LABEL":"2.6.18.8-domU-linode7","ISXEN":1,"KERNELID":81},{"LABEL":"2.6.18.8-linode10","ISXEN":1,"KERNELID":89},{"LABEL":"2.6.18.8-linode16","ISXEN":1,"KERNELID":98},{"LABEL":"2.6.24.4-linode8","ISXEN":1,"KERNELID":84},{"LABEL":"2.6.25-linode9","ISXEN":1,"KERNELID":88},{"LABEL":"2.6.25.10-linode12","ISXEN":1,"KERNELID":90},{"LABEL":"2.6.26-linode13","ISXEN":1,"KERNELID":91},{"LABEL":"2.6.27.4-linode14","ISXEN":1,"KERNELID":93},{"LABEL":"2.6.28-linode15","ISXEN":1,"KERNELID":96},{"LABEL":"2.6.28.3-linode17","ISXEN":1,"KERNELID":99},{"LABEL":"2.6.29-linode18","ISXEN":1,"KERNELID":101},{"LABEL":"2.6.16.38-x86_64-linode2","ISXEN":1,"KERNELID":85},{"LABEL":"2.6.18.8-x86_64-linode1","ISXEN":1,"KERNELID":86},{"LABEL":"2.6.27.4-x86_64-linode3","ISXEN":1,"KERNELID":94},{"LABEL":"2.6.28-x86_64-linode4","ISXEN":1,"KERNELID":97},{"LABEL":"2.6.28.3-x86_64-linode5","ISXEN":1,"KERNELID":100},{"LABEL":"2.6.29-x86_64-linode6","ISXEN":1,"KERNELID":102}]}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_disk_create(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.disk.create","DATA":{"JobID":1299,"DiskID":55648}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_boot(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.boot","DATA":{"JobID":1300}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_config_create(self, method, url, body, headers): - body = '{"ERRORARRAY":[],"ACTION":"linode.config.create","DATA":{"ConfigID":31239}}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_list(self, method, url, body, headers): - body = '{"ACTION": "linode.list", "DATA": [{"ALERT_DISKIO_ENABLED": 1, "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "DATACENTERID": 5, "ALERT_BWOUT_ENABLED": 1, "ALERT_CPU_THRESHOLD": 10, "TOTALHD": 100, "ALERT_BWQUOTA_THRESHOLD": 81, "ALERT_BWQUOTA_ENABLED": 1, "TOTALXFER": 200, "STATUS": 2, "ALERT_BWIN_ENABLED": 1, "ALERT_BWIN_THRESHOLD": 5, "ALERT_DISKIO_THRESHOLD": 200, "WATCHDOG": 1, "LINODEID": 8098, "BACKUPWINDOW": 1, "TOTALRAM": 540, "LPM_DISPLAYGROUP": "", "ALERT_BWOUT_THRESHOLD": 5, "BACKUPSENABLED": 1, "ALERT_CPU_ENABLED": 1}], "ERRORARRAY": []}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _linode_ip_list(self, method, url, body, headers): - body = '{"ACTION": "linode.ip.list", "DATA": [{"RDNS_NAME": "li22-54.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.54", "IPADDRESSID": 5384, "LINODEID": 8098}, {"RDNS_NAME": "li22-245.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.245", "IPADDRESSID": 5575, "LINODEID": 8098}], "ERRORARRAY": []}' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _batch(self, method, url, body, headers): - body = '[{"ACTION": "linode.ip.list", "DATA": [{"RDNS_NAME": "li22-54.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.54", "IPADDRESSID": 5384, "LINODEID": 8098}, {"RDNS_NAME": "li22-245.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.245", "IPADDRESSID": 5575, "LINODEID": 8098}], "ERRORARRAY": []}]' - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_opennebula.py libcloud-0.15.1/test/compute/test_opennebula.py --- libcloud-0.5.0/test/compute/test_opennebula.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_opennebula.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad -# Complutense de Madrid (dsa-research.org) -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver -from libcloud.compute.base import Node, NodeImage, NodeSize - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures - -from test.secrets import OPENNEBULA_USER, OPENNEBULA_KEY - -class OpenNebulaTests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - OpenNebulaNodeDriver.connectionCls.conn_classes = (None, OpenNebulaMockHttp) - self.driver = OpenNebulaNodeDriver(OPENNEBULA_USER, OPENNEBULA_KEY) - - def test_create_node(self): - image = NodeImage(id=1, name='UbuntuServer9.04-Contextualized', driver=self.driver) - size = NodeSize(1, 'small', None, None, None, None, driver=self.driver) - node = self.driver.create_node(name='MyCompute', image=image, size=size) - self.assertEqual(node.id, '5') - self.assertEqual(node.name, 'MyCompute') - - def test_list_nodes(self): - nodes = self.driver.list_nodes() - self.assertEqual(len(nodes), 2) - node = nodes[0] - self.assertEqual(node.id, '5') - self.assertEqual(node.name, 'MyCompute') - - def test_reboot_node(self): - node = Node(5, None, None, None, None, self.driver) - ret = self.driver.reboot_node(node) - self.assertTrue(ret) - - def test_destroy_node(self): - node = Node(5, None, None, None, None, self.driver) - ret = self.driver.destroy_node(node) - self.assertTrue(ret) - - def test_list_sizes(self): - sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 3) - self.assertTrue('small' in [ s.name for s in sizes]) - self.assertTrue('medium' in [ s.name for s in sizes]) - self.assertTrue('large' in [ s.name for s in sizes]) - - def test_list_images(self): - images = self.driver.list_images() - self.assertEqual(len(images), 2) - image = images[0] - self.assertEqual(image.id, '1') - self.assertEqual(image.name, 'UbuntuServer9.04-Contextualized') - -class OpenNebulaMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('opennebula') - - def _compute(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('computes.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - if method == 'POST': - body = self.fixtures.load('compute.xml') - return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) - - def _storage(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('storage.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _compute_5(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('compute.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - if method == 'PUT': - body = "" - return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) - - if method == 'DELETE': - body = "" - return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) - - def _compute_15(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('compute.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _storage_1(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('disk.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _storage_8(self, method, url, body, headers): - if method == 'GET': - body = self.fixtures.load('disk.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_opsource.py libcloud-0.15.1/test/compute/test_opsource.py --- libcloud-0.5.0/test/compute/test_opsource.py 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_opsource.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,223 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.common.types import InvalidCredsError -from libcloud.compute.drivers.opsource import OpsourceNodeDriver as Opsource -from libcloud.compute.drivers.opsource import OpsourceAPIException, OpsourceNetwork -from libcloud.compute.base import Node, NodeImage, NodeSize, NodeAuthPassword, NodeLocation - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures - -from test.secrets import OPSOURCE_USER, OPSOURCE_PASS - -class OpsourceTests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - Opsource.connectionCls.conn_classes = (None, OpsourceMockHttp) - OpsourceMockHttp.type = None - self.driver = Opsource(OPSOURCE_USER, OPSOURCE_PASS) - - def test_invalid_creds(self): - OpsourceMockHttp.type = 'UNAUTHORIZED' - try: - self.driver.list_nodes() - self.assertTrue(False) # Above command should have thrown an InvalidCredsException - except InvalidCredsError: - self.assertTrue(True) - - def test_list_sizes_response(self): - OpsourceMockHttp.type = None - ret = self.driver.list_sizes() - self.assertEqual(len(ret), 1) - size = ret[0] - self.assertEqual(size.name, 'default') - - def test_reboot_node_response(self): - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = node.reboot() - self.assertTrue(ret is True) - - def test_reboot_node_response_INPROGRESS(self): - OpsourceMockHttp.type = 'INPROGRESS' - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - try: - ret = node.reboot() - self.assertTrue(False) # above command should have thrown OpsourceAPIException - except OpsourceAPIException: - self.assertTrue(True) - - def test_destroy_node_response(self): - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = node.destroy() - self.assertTrue(ret is True) - - def test_destroy_node_response_INPROGRESS(self): - OpsourceMockHttp.type = 'INPROGRESS' - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - try: - ret = node.destroy() - self.assertTrue(False) # above command should have thrown OpsourceAPIException - except OpsourceAPIException: - self.assertTrue(True) - - def test_create_node_response(self): - rootPw = NodeAuthPassword('pass123') - image = self.driver.list_images()[0] - location = self.driver.list_locations()[0] - network = self.driver.ex_list_networks()[0] - node = self.driver.create_node(name='test2', image=image, auth=rootPw, - ex_description='test2 node', ex_network=network, - ex_isStarted=False) - self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') - self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') - - def test_ex_shutdown_graceful(self): - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = self.driver.ex_shutdown_graceful(node) - self.assertTrue(ret is True) - - def test_ex_shutdown_graceful_INPROGRESS(self): - OpsourceMockHttp.type = 'INPROGRESS' - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - try: - ret = self.driver.ex_shutdown_graceful(node) - self.assertTrue(False) # above command should have thrown OpsourceAPIException - except OpsourceAPIException: - self.assertTrue(True) - - def test_ex_start_node(self): - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = self.driver.ex_start_node(node) - self.assertTrue(ret is True) - - def test_ex_start_node_INPROGRESS(self): - OpsourceMockHttp.type = 'INPROGRESS' - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - try: - ret = self.driver.ex_start_node(node) - self.assertTrue(False) # above command should have thrown OpsourceAPIException - except OpsourceAPIException: - self.assertTrue(True) - - def test_ex_power_off(self): - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - ret = self.driver.ex_power_off(node) - self.assertTrue(ret is True) - - def test_ex_power_off_INPROGRESS(self): - OpsourceMockHttp.type = 'INPROGRESS' - node = Node(id='11', name=None, state=None, - public_ip=None, private_ip=None, driver=self.driver) - try: - ret = self.driver.ex_power_off(node) - self.assertTrue(False) # above command should have thrown OpsourceAPIException - except OpsourceAPIException: - self.assertTrue(True) - - def test_ex_list_networks(self): - nets = self.driver.ex_list_networks() - self.assertEqual(nets[0].name, 'test-net1') - self.assertTrue(isinstance(nets[0].location, NodeLocation)) - -class OpsourceMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('opsource') - - def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) - - def _oec_0_9_myaccount(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_myaccount.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_myaccount.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_base_image(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_base_image.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers): - body = None - action = url.split('?')[-1] - - if action == 'restart': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml') - elif action == 'shutdown': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml') - elif action == 'delete': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml') - elif action == 'start': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml') - elif action == 'poweroff': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml') - - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers): - body = None - action = url.split('?')[-1] - - if action == 'restart': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml') - elif action == 'shutdown': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml') - elif action == 'delete': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml') - elif action == 'start': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml') - elif action == 'poweroff': - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml') - - return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): - body = self.fixtures.load('_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers): - body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/compute/test_rackspace.py libcloud-0.15.1/test/compute/test_rackspace.py --- libcloud-0.5.0/test/compute/test_rackspace.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/compute/test_rackspace.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,283 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest -import httplib - -from libcloud.common.types import InvalidCredsError -from libcloud.compute.drivers.rackspace import RackspaceNodeDriver as Rackspace -from libcloud.compute.base import Node, NodeImage, NodeSize - -from test import MockHttp -from test.compute import TestCaseMixin -from test.file_fixtures import ComputeFileFixtures - -from test.secrets import RACKSPACE_USER, RACKSPACE_KEY - -class RackspaceTests(unittest.TestCase, TestCaseMixin): - - def setUp(self): - Rackspace.connectionCls.conn_classes = (None, RackspaceMockHttp) - RackspaceMockHttp.type = None - self.driver = Rackspace(RACKSPACE_USER, RACKSPACE_KEY) - - def test_auth(self): - RackspaceMockHttp.type = 'UNAUTHORIZED' - try: - self.driver = Rackspace(RACKSPACE_USER, RACKSPACE_KEY) - except InvalidCredsError, e: - self.assertEqual(True, isinstance(e, InvalidCredsError)) - else: - self.fail('test should have thrown') - - def test_auth_missing_key(self): - RackspaceMockHttp.type = 'UNAUTHORIZED_MISSING_KEY' - try: - self.driver = Rackspace(RACKSPACE_USER, RACKSPACE_KEY) - except InvalidCredsError, e: - self.assertEqual(True, isinstance(e, InvalidCredsError)) - else: - self.fail('test should have thrown') - - def test_list_nodes(self): - RackspaceMockHttp.type = 'EMPTY' - ret = self.driver.list_nodes() - self.assertEqual(len(ret), 0) - RackspaceMockHttp.type = None - ret = self.driver.list_nodes() - self.assertEqual(len(ret), 1) - node = ret[0] - self.assertEqual('67.23.21.33', node.public_ip[0]) - self.assertEqual('10.176.168.218', node.private_ip[0]) - self.assertEqual(node.extra.get('flavorId'), '1') - self.assertEqual(node.extra.get('imageId'), '11') - self.assertEqual(type(node.extra.get('metadata')), type(dict())) - RackspaceMockHttp.type = 'METADATA' - ret = self.driver.list_nodes() - self.assertEqual(len(ret), 1) - node = ret[0] - self.assertEqual(type(node.extra.get('metadata')), type(dict())) - self.assertEqual(node.extra.get('metadata').get('somekey'), 'somevalue') - RackspaceMockHttp.type = None - - def test_list_sizes(self): - ret = self.driver.list_sizes() - self.assertEqual(len(ret), 7) - size = ret[0] - self.assertEqual(size.name, '256 slice') - self.assertTrue(isinstance(size.price, float)) - - def test_list_images(self): - ret = self.driver.list_images() - self.assertEqual(ret[10].extra['serverId'], None) - self.assertEqual(ret[11].extra['serverId'], '91221') - - def test_create_node(self): - image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) - size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) - node = self.driver.create_node(name='racktest', image=image, size=size, shared_ip_group='group1') - self.assertEqual(node.name, 'racktest') - self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') - - def test_create_node_with_metadata(self): - RackspaceMockHttp.type = 'METADATA' - image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) - size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) - metadata = { 'a': 'b', 'c': 'd' } - files = { '/file1': 'content1', '/file2': 'content2' } - node = self.driver.create_node(name='racktest', image=image, size=size, metadata=metadata, files=files) - self.assertEqual(node.name, 'racktest') - self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') - self.assertEqual(node.extra.get('metadata'), metadata) - - def test_reboot_node(self): - node = Node(id=72258, name=None, state=None, public_ip=None, private_ip=None, - driver=self.driver) - ret = node.reboot() - self.assertTrue(ret is True) - - def test_destroy_node(self): - node = Node(id=72258, name=None, state=None, public_ip=None, private_ip=None, - driver=self.driver) - ret = node.destroy() - self.assertTrue(ret is True) - - def test_ex_limits(self): - limits = self.driver.ex_limits() - self.assertTrue("rate" in limits) - self.assertTrue("absolute" in limits) - - def test_ex_save_image(self): - node = Node(id=444222, name=None, state=None, public_ip=None, private_ip=None, - driver=self.driver) - image = self.driver.ex_save_image(node, "imgtest") - self.assertEqual(image.name, "imgtest") - self.assertEqual(image.id, "12345") - - def test_ex_list_ip_addresses(self): - ret = self.driver.ex_list_ip_addresses(node_id=72258) - self.assertEquals(2, len(ret.public_addresses)) - self.assertTrue('67.23.10.131' in ret.public_addresses) - self.assertTrue('67.23.10.132' in ret.public_addresses) - self.assertEquals(1, len(ret.private_addresses)) - self.assertTrue('10.176.42.16' in ret.private_addresses) - - def test_ex_list_ip_groups(self): - ret = self.driver.ex_list_ip_groups() - self.assertEquals(2, len(ret)) - self.assertEquals('1234', ret[0].id) - self.assertEquals('Shared IP Group 1', ret[0].name) - self.assertEquals('5678', ret[1].id) - self.assertEquals('Shared IP Group 2', ret[1].name) - self.assertTrue(ret[0].servers is None) - - def test_ex_list_ip_groups_detail(self): - ret = self.driver.ex_list_ip_groups(details=True) - - self.assertEquals(2, len(ret)) - - self.assertEquals('1234', ret[0].id) - self.assertEquals('Shared IP Group 1', ret[0].name) - self.assertEquals(2, len(ret[0].servers)) - self.assertEquals('422', ret[0].servers[0]) - self.assertEquals('3445', ret[0].servers[1]) - - self.assertEquals('5678', ret[1].id) - self.assertEquals('Shared IP Group 2', ret[1].name) - self.assertEquals(3, len(ret[1].servers)) - self.assertEquals('23203', ret[1].servers[0]) - self.assertEquals('2456', ret[1].servers[1]) - self.assertEquals('9891', ret[1].servers[2]) - - def test_ex_create_ip_group(self): - ret = self.driver.ex_create_ip_group('Shared IP Group 1', '5467') - self.assertEquals('1234', ret.id) - self.assertEquals('Shared IP Group 1', ret.name) - self.assertEquals(1, len(ret.servers)) - self.assertEquals('422', ret.servers[0]) - - def test_ex_delete_ip_group(self): - ret = self.driver.ex_delete_ip_group('5467') - self.assertEquals(True, ret) - - def test_ex_share_ip(self): - ret = self.driver.ex_share_ip('1234', '3445', '67.23.21.133') - self.assertEquals(True, ret) - - def test_ex_unshare_ip(self): - ret = self.driver.ex_unshare_ip('3445', '67.23.21.133') - self.assertEquals(True, ret) - - -class RackspaceMockHttp(MockHttp): - - fixtures = ComputeFileFixtures('rackspace') - - # fake auth token response - def _v1_0(self, method, url, body, headers): - headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', - 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} - return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) - - def _v1_0_UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) - - def _v1_0_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers): - headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', - 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} - return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) - - def _v1_0_slug_servers_detail_EMPTY(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_servers_detail_empty.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_servers_detail(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_servers_detail.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_servers_detail_METADATA(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_servers_detail_metadata.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_flavors_detail(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_flavors_detail.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_images(self, method, url, body, headers): - if method != "POST": - raise NotImplemented - # this is currently used for creation of new image with - # POST request, don't handle GET to avoid possible confusion - body = self.fixtures.load('v1_slug_images_post.xml') - return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) - - def _v1_0_slug_images_detail(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_images_detail.xml') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_servers(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_servers.xml') - return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) - - def _v1_0_slug_servers_METADATA(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_servers_metadata.xml') - return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) - - def _v1_0_slug_servers_72258_action(self, method, url, body, headers): - if method != "POST" or body[:8] != ">> mock = MockHttp('localhost', 8080) - >>> mock.request('GET', '/example/') - >>> response = mock.getresponse() - >>> response.body.read() - 'Hello World!' - >>> response.status - 200 - >>> response.getheaders() - [('X-Foo', 'libcloud')] - >>> MockHttp.type = 'fail' - >>> mock.request('GET', '/example/') - >>> response = mock.getresponse() - >>> response.body.read() - 'Oh Noes!' - >>> response.status - 403 - >>> response.getheaders() - [('X-Foo', 'fail')] - - """ - responseCls = MockResponse - host = None - port = None - response = None - - type = None - use_param = None # will use this param to namespace the request function - - def __init__(self, host, port, *args, **kwargs): - self.host = host - self.port = port - - def request(self, method, url, body=None, headers=None, raw=False): - # Find a method we can use for this request - parsed = urlparse.urlparse(url) - scheme, netloc, path, params, query, fragment = parsed - qs = parse_qs(query) - if path.endswith('/'): - path = path[:-1] - meth_name = self._get_method_name(type=self.type, - use_param=self.use_param, - qs=qs, path=path) - meth = getattr(self, meth_name) - status, body, headers, reason = meth(method, url, body, headers) - self.response = self.responseCls(status, body, headers, reason) - - def getresponse(self): - return self.response - - def connect(self): - """ - Can't think of anything to mock here. - """ - pass - - def close(self): - pass - - # Mock request/response example - def _example(self, method, url, body, headers): - """ - Return a simple message and header, regardless of input. - """ - return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'}, - httplib.responses[httplib.OK]) - - def _example_fail(self, method, url, body, headers): - return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'}, - httplib.responses[httplib.FORBIDDEN]) - -class MockHttpTestCase(MockHttp, unittest.TestCase): - # Same as the MockHttp class, but you can also use assertions in the - # classes which inherit from this one. - def __init__(self, *args, **kwargs): - unittest.TestCase.__init__(self) - - if kwargs.get('host', None) and kwargs.get('port', None): - MockHttp.__init__(self, *args, **kwargs) - - def runTest(self): - pass - -class StorageMockHttp(MockHttp): - def putrequest(self, method, action): - pass - - def putheader(self, key, value): - pass - - def endheaders(self): - pass - - def send(self, data): - pass - -class MockRawResponse(BaseMockHttpObject): - """ - Mock RawResponse object suitable for testing. - """ - - type = None - responseCls = MockResponse - - def __init__(self): - super(MockRawResponse, self).__init__() - self._data = [] - self._current_item = 0 - - self._status = None - self._response = None - self._headers = None - self._reason = None - - def next(self): - if self._current_item == len(self._data): - raise StopIteration - - value = self._data[self._current_item] - self._current_item += 1 - return value - - def _generate_random_data(self, size): - data = [] - current_size = 0 - while current_size < size: - value = str(random.randint(0, 9)) - value_size = len(value) - data.append(value) - current_size += value_size - - return data - - @property - def response(self): - return self._get_response_if_not_availale() - - @property - def status(self): - self._get_response_if_not_availale() - return self._status - - @property - def headers(self): - self._get_response_if_not_availale() - return self._headers - - @property - def reason(self): - self._get_response_if_not_availale() - return self._reason - - def _get_response_if_not_availale(self): - if not self._response: - meth_name = self._get_method_name(type=self.type, - use_param=False, qs=None, - path=self.connection.action) - meth = getattr(self, meth_name) - result = meth(self.connection.method, None, None, None) - self._status, self._body, self._headers, self._reason = result - self._response = self.responseCls(self._status, self._body, - self._headers, self._reason) - return self - return self._response - -if __name__ == "__main__": - import doctest - doctest.testmod() diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/ip_list.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/ip_list.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/ip_list.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/ip_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,316 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868101, - "ip": "10.0.0.68", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868102, - "ip": "10.0.0.69", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868106, - "ip": "10.0.0.73", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868107, - "ip": "10.0.0.74", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868108, - "ip": "10.0.0.75", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868109, - "ip": "10.0.0.76", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868110, - "ip": "10.0.0.77", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.0.0.78", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277337, - "ip": "10.0.0.244", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277338, - "ip": "10.0.0.245", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277342, - "ip": "10.0.0.249", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277343, - "ip": "10.0.0.250", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277344, - "ip": "10.0.0.251", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277345, - "ip": "10.0.0.252", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277346, - "ip": "10.0.0.253", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - }, - { - "datacenter": { - "description": "US East 1 Datacenter", - "id": 2, - "name": "US-East-1", - "object": "option" - }, - "id": 2277347, - "ip": "10.0.0.254", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.240/255.255.255.240" - } - ], - "method": "/grid/ip/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 16, - "start": 0, - "total": 16 - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "name": "test2", - "id": 123, - "object": "loadbalancer", - "os": { - "description": "The F5 Load Balancer.", - "id": 1, - "name": "F5", - "object": "option" - }, - "persistence": { - "description": "", - "id": 1, - "name": "None", - "object": "option" - }, - "realiplist": [ - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868109, - "ip": "10.1.0.10", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.1.0.10/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868110, - "ip": "10.1.0.11", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.1.0.11/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.1.0.12", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.1.0.12/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - ], - "state": { - "description": "Loadbalancer is enabled and on.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "", - "id": 1, - "name": "Round Robin", - "object": "option" - }, - "virtualip": { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868101, - "ip": "1.1.1.1", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "1.1.1.1/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - } - ], - "method": "/grid/loadbalancer/add", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 23530, - "name": "test2", - "object": "loadbalancer", - "os": { - "description": "The F5 Load Balancer.", - "id": 1, - "name": "F5", - "object": "option" - }, - "persistence": { - "description": "", - "id": 1, - "name": "None", - "object": "option" - }, - "realiplist": [ - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868108, - "ip": "10.0.0.75", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868109, - "ip": "10.0.0.76", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868110, - "ip": "10.0.0.77", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.0.0.78", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - ], - "state": { - "description": "Loadbalancer is enabled and on.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "", - "id": 1, - "name": "Round Robin", - "object": "option" - }, - "virtualip": { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868101, - "ip": "10.0.0.68", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - } - ], - "method": "/grid/loadbalancer/edit", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,141 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 23530, - "name": "test2", - "object": "loadbalancer", - "os": { - "description": "The F5 Load Balancer.", - "id": 1, - "name": "F5", - "object": "option" - }, - "persistence": { - "description": "", - "id": 1, - "name": "None", - "object": "option" - }, - "realiplist": [ - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868109, - "ip": "10.0.0.76", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868110, - "ip": "10.0.0.77", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.0.0.78", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - ], - "state": { - "description": "Loadbalancer is enabled and on.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "", - "id": 1, - "name": "Round Robin", - "object": "option" - }, - "virtualip": { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868101, - "ip": "10.0.0.68", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - } - ], - "method": "/grid/loadbalancer/get", - "status": "success", - "summary": { - "numpages": 0, - "returned": 1, - "start": 0, - "total": 1 - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,224 +0,0 @@ -{ - "list": [ - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 23517, - "name": "foo", - "object": "loadbalancer", - "os": { - "description": "The F5 Load Balancer.", - "id": 1, - "name": "F5", - "object": "option" - }, - "persistence": { - "description": "", - "id": 1, - "name": "None", - "object": "option" - }, - "realiplist": [ - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.0.0.78", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - ], - "state": { - "description": "Loadbalancer is enabled and on.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "", - "id": 1, - "name": "Round Robin", - "object": "option" - }, - "virtualip": { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868099, - "ip": "10.0.0.66", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - }, - { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 23526, - "name": "bar", - "object": "loadbalancer", - "os": { - "description": "The F5 Load Balancer.", - "id": 1, - "name": "F5", - "object": "option" - }, - "persistence": { - "description": "", - "id": 1, - "name": "None", - "object": "option" - }, - "realiplist": [ - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868109, - "ip": "10.0.0.76", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868110, - "ip": "10.0.0.77", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - }, - { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868111, - "ip": "10.0.0.78", - "object": "ip", - "public": true, - "state": { - "description": "IP is available to use", - "id": 1, - "name": "Unassigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - ], - "state": { - "description": "Loadbalancer is enabled and on.", - "id": 1, - "name": "On", - "object": "option" - }, - "type": { - "description": "", - "id": 1, - "name": "Round Robin", - "object": "option" - }, - "virtualip": { - "ip": { - "datacenter": { - "description": "US West 1 Datacenter", - "id": 1, - "name": "US-West-1", - "object": "option" - }, - "id": 1868100, - "ip": "10.0.0.67", - "object": "ip", - "public": true, - "state": { - "description": "IP is reserved or in use", - "id": 2, - "name": "Assigned", - "object": "option" - }, - "subnet": "10.0.0.64/255.255.255.240" - }, - "object": "ipportpair", - "port": 80 - } - } - ], - "method": "/grid/loadbalancer/list", - "status": "success", - "summary": { - "numpages": 0, - "returned": 2, - "start": 0, - "total": 2 - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/unexpected_error.json libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/unexpected_error.json --- libcloud-0.5.0/test/loadbalancer/fixtures/gogrid/unexpected_error.json 2011-05-21 20:10:19.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/gogrid/unexpected_error.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"summary":{"total":1,"start":0,"returned":1},"status":"failure","method":"/grid/loadbalancer/add","list":[{"message":"An unexpected server error has occured. Please email this error to apisupport@gogrid.com. Error Message : null","object":"error","errorcode":"UnexpectedException"}]} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -{ - "loadBalancer": { - "algorithm": "RANDOM", - "cluster": { - "name": "ztm-n05.lbaas.ord1.rackspace.net" - }, - "connectionLogging": { - "enabled": false - }, - "created": { - "time": "2011-04-07T16:27:50Z" - }, - "id": 8290, - "name": "test2", - "nodes": [ - { - "address": "10.1.0.11", - "condition": "ENABLED", - "id": 30944, - "port": 80, - "status": "ONLINE" - }, - { - "address": "10.1.0.10", - "condition": "ENABLED", - "id": 30945, - "port": 80, - "status": "ONLINE" - } - ], - "port": 80, - "protocol": "HTTP", - "status": "ACTIVE", - "updated": { - "time": "2011-04-07T16:28:12Z" - }, - "virtualIps": [ - { - "address": "1.1.1.1", - "id": 1151, - "ipVersion": "IPV4", - "type": "PUBLIC" - } - ] - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -{ - "nodes": [ - { - "address": "10.1.0.11", - "condition": "ENABLED", - "id": 30944, - "port": 80, - "status": "ONLINE" - }, - { - "address": "10.1.0.10", - "condition": "ENABLED", - "id": 30945, - "port": 80, - "status": "ONLINE" - } - ] -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "nodes": [ - { - "address": "10.1.0.12", - "condition": "ENABLED", - "id": 30972, - "port": 80, - "status": "ONLINE", - "weight": 1 - } - ] -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -{ - "loadBalancers": [ - { - "algorithm": "RANDOM", - "created": { - "time": "2011-04-06T21:25:19+0000" - }, - "id": 8155, - "name": "test0", - "port": 80, - "protocol": "HTTP", - "status": "ACTIVE", - "updated": { - "time": "2011-04-06T21:25:31+0000" - }, - "virtualIps": [ - { - "address": "1.1.1.25", - "id": 965, - "ipVersion": "IPV4", - "type": "PUBLIC" - } - ] - }, - { - "algorithm": "RANDOM", - "created": { - "time": "2011-04-06T21:26:22+0000" - }, - "id": 8156, - "name": "test1", - "port": 80, - "protocol": "HTTP", - "status": "ACTIVE", - "updated": { - "time": "2011-04-06T21:26:33+0000" - }, - "virtualIps": [ - { - "address": "1.1.1.83", - "id": 1279, - "ipVersion": "IPV4", - "type": "PUBLIC" - } - ] - } - ] -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -{ - "loadBalancer": { - "algorithm": "RANDOM", - "cluster": { - "name": "ztm-n05.lbaas.ord1.rackspace.net" - }, - "connectionLogging": { - "enabled": false - }, - "created": { - "time": "2011-04-07T16:27:50+0000" - }, - "id": 8290, - "name": "test2", - "nodes": [ - { - "address": "10.1.0.11", - "condition": "ENABLED", - "id": 30944, - "port": 80, - "status": "ONLINE", - "weight": 1 - }, - { - "address": "10.1.0.10", - "condition": "ENABLED", - "id": 30945, - "port": 80, - "status": "ONLINE", - "weight": 1 - } - ], - "port": 80, - "protocol": "HTTP", - "status": "BUILD", - "updated": { - "time": "2011-04-07T16:27:50+0000" - }, - "virtualIps": [ - { - "address": "1.1.1.1", - "id": 1151, - "ipVersion": "IPV4", - "type": "PUBLIC" - } - ] - } -} diff -Nru libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json --- libcloud-0.5.0/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json 2011-05-14 15:12:32.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -{"protocols": [ - { - "name": "HTTP", - "port": "80" - }, - { - "name": "FTP", - "port": "21" - }, - { - "name": "IMAPv4", - "port": "143" - }, - { - "name": "POP3", - "port": "110" - }, - { - "name": "SMTP", - "port": "25" - }, - { - "name": "LDAP", - "port": "389" - }, - { - "name": "HTTPS", - "port": "443" - }, - { - "name": "IMAPS", - "port": "993" - }, - { - "name": "POP3S", - "port": "995" - }, - { - "name": "LDAPS", - "port": "636" - } - ] -} diff -Nru libcloud-0.5.0/test/loadbalancer/__init__.py libcloud-0.15.1/test/loadbalancer/__init__.py --- libcloud-0.5.0/test/loadbalancer/__init__.py 2011-05-14 09:02:52.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff -Nru libcloud-0.5.0/test/loadbalancer/test_gogrid.py libcloud-0.15.1/test/loadbalancer/test_gogrid.py --- libcloud-0.5.0/test/loadbalancer/test_gogrid.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/test_gogrid.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import httplib -import sys -import unittest -from urlparse import urlparse, parse_qsl - -from libcloud.common.types import LibcloudError -from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm -from libcloud.loadbalancer.drivers.gogrid import GoGridLBDriver - -from test import MockHttpTestCase -from test.file_fixtures import LoadBalancerFileFixtures - -class GoGridTests(unittest.TestCase): - - def setUp(self): - GoGridLBDriver.connectionCls.conn_classes = (None, - GoGridLBMockHttp) - GoGridLBMockHttp.type = None - self.driver = GoGridLBDriver('user', 'key') - - def test_list_protocols(self): - protocols = self.driver.list_protocols() - - self.assertEqual(len(protocols), 1) - self.assertEqual(protocols[0], 'http') - - def test_list_balancers(self): - balancers = self.driver.list_balancers() - - self.assertEquals(len(balancers), 2) - self.assertEquals(balancers[0].name, "foo") - self.assertEquals(balancers[0].id, "23517") - self.assertEquals(balancers[1].name, "bar") - self.assertEquals(balancers[1].id, "23526") - - def test_create_balancer(self): - balancer = self.driver.create_balancer(name='test2', - port=80, - protocol='http', - algorithm=Algorithm.ROUND_ROBIN, - members=(Member(None, '10.1.0.10', 80), - Member(None, '10.1.0.11', 80)) - ) - - self.assertEquals(balancer.name, 'test2') - self.assertEquals(balancer.id, '123') - - def test_create_balancer_UNEXPECTED_ERROR(self): - # Try to create new balancer and attach members with an IP address which - # does not belong to this account - GoGridLBMockHttp.type = 'UNEXPECTED_ERROR' - - try: - self.driver.create_balancer(name='test2', - port=80, - protocol='http', - algorithm=Algorithm.ROUND_ROBIN, - members=(Member(None, '10.1.0.10', 80), - Member(None, '10.1.0.11', 80)) - ) - except LibcloudError, e: - self.assertTrue(str(e).find('tried to add a member with an IP address not assigned to your account') != -1) - else: - self.fail('Exception was not thrown') - - def test_destroy_balancer(self): - balancer = self.driver.list_balancers()[0] - - ret = self.driver.destroy_balancer(balancer) - self.assertTrue(ret) - - def test_get_balancer(self): - balancer = self.driver.get_balancer(balancer_id='23530') - - self.assertEquals(balancer.name, 'test2') - self.assertEquals(balancer.id, '23530') - - def test_balancer_list_members(self): - balancer = self.driver.get_balancer(balancer_id='23530') - members = balancer.list_members() - - expected_members = set([u'10.0.0.78:80', u'10.0.0.77:80', - u'10.0.0.76:80']) - - self.assertEquals(len(members), 3) - self.assertEquals(expected_members, - set(["%s:%s" % (member.ip, member.port) for member in members])) - - def test_balancer_attach_member(self): - balancer = LoadBalancer(23530, None, None, None, None, None) - member = self.driver.balancer_attach_member(balancer, - Member(None, ip='10.0.0.75', port='80')) - - self.assertEquals(member.ip, '10.0.0.75') - self.assertEquals(member.port, 80) - - def test_balancer_detach_member(self): - balancer = LoadBalancer(23530, None, None, None, None, None) - member = self.driver.balancer_list_members(balancer)[0] - - ret = self.driver.balancer_detach_member(balancer, member) - - self.assertTrue(ret) - -class GoGridLBMockHttp(MockHttpTestCase): - fixtures = LoadBalancerFileFixtures('gogrid') - - def _api_grid_loadbalancer_list(self, method, url, body, headers): - body = self.fixtures.load('loadbalancer_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_ip_list(self, method, url, body, headers): - body = self.fixtures.load('ip_list.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_loadbalancer_add(self, method, url, body, headers): - qs = dict(parse_qsl(urlparse(url).query)) - self.assertEqual(qs['loadbalancer.type'], 'round robin') - - body = self.fixtures.load('loadbalancer_add.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_ip_list_UNEXPECTED_ERROR(self, method, url, body, headers): - return self._api_grid_ip_list(method, url, body, headers) - - def _api_grid_loadbalancer_add_UNEXPECTED_ERROR(self, method, url, body, headers): - body = self.fixtures.load('unexpected_error.json') - return (httplib.INTERNAL_SERVER_ERROR, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_loadbalancer_delete(self, method, url, body, headers): - body = self.fixtures.load('loadbalancer_add.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_loadbalancer_get(self, method, url, body, headers): - body = self.fixtures.load('loadbalancer_get.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _api_grid_loadbalancer_edit(self, method, url, body, headers): - body = self.fixtures.load('loadbalancer_edit.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - -if __name__ == "__main__": - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/loadbalancer/test_rackspace.py libcloud-0.15.1/test/loadbalancer/test_rackspace.py --- libcloud-0.5.0/test/loadbalancer/test_rackspace.py 2011-05-21 20:10:20.000000000 +0000 +++ libcloud-0.15.1/test/loadbalancer/test_rackspace.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import httplib -import sys -import unittest - -try: - import json -except ImportError: - import simplejson as json - -from libcloud.loadbalancer.base import Member, Algorithm -from libcloud.loadbalancer.drivers.rackspace import RackspaceLBDriver - -from test import MockHttp -from test.file_fixtures import LoadBalancerFileFixtures - -class RackspaceLBTests(unittest.TestCase): - - def setUp(self): - RackspaceLBDriver.connectionCls.conn_classes = (None, - RackspaceLBMockHttp) - RackspaceLBMockHttp.type = None - self.driver = RackspaceLBDriver('user', 'key') - - def test_list_protocols(self): - protocols = self.driver.list_protocols() - - self.assertEqual(len(protocols), 10) - self.assertTrue('http' in protocols) - - def test_list_balancers(self): - balancers = self.driver.list_balancers() - - self.assertEquals(len(balancers), 2) - self.assertEquals(balancers[0].name, "test0") - self.assertEquals(balancers[0].id, "8155") - self.assertEquals(balancers[1].name, "test1") - self.assertEquals(balancers[1].id, "8156") - - def test_create_balancer(self): - balancer = self.driver.create_balancer(name='test2', - port=80, - algorithm=Algorithm.ROUND_ROBIN, - members=(Member(None, '10.1.0.10', 80), - Member(None, '10.1.0.11', 80)) - ) - - self.assertEquals(balancer.name, 'test2') - self.assertEquals(balancer.id, '8290') - - def test_destroy_balancer(self): - balancer = self.driver.list_balancers()[0] - - ret = self.driver.destroy_balancer(balancer) - self.assertTrue(ret) - - def test_get_balancer(self): - balancer = self.driver.get_balancer(balancer_id='8290') - - self.assertEquals(balancer.name, 'test2') - self.assertEquals(balancer.id, '8290') - - def test_balancer_list_members(self): - balancer = self.driver.get_balancer(balancer_id='8290') - members = balancer.list_members() - - self.assertEquals(len(members), 2) - self.assertEquals(set(['10.1.0.10:80', '10.1.0.11:80']), - set(["%s:%s" % (member.ip, member.port) for member in members])) - - def test_balancer_attach_member(self): - balancer = self.driver.get_balancer(balancer_id='8290') - member = balancer.attach_member(Member(None, ip='10.1.0.12', port='80')) - - self.assertEquals(member.ip, '10.1.0.12') - self.assertEquals(member.port, 80) - - def test_balancer_detach_member(self): - balancer = self.driver.get_balancer(balancer_id='8290') - member = balancer.list_members()[0] - - ret = balancer.detach_member(member) - - self.assertTrue(ret) - -class RackspaceLBMockHttp(MockHttp, unittest.TestCase): - fixtures = LoadBalancerFileFixtures('rackspace') - - def _v1_0(self, method, url, body, headers): - headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', - 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', - 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} - return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) - - def _v1_0_slug_loadbalancers_protocols(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_loadbalancers_protocols.json') - return (httplib.ACCEPTED, body, {}, - httplib.responses[httplib.ACCEPTED]) - - def _v1_0_slug_loadbalancers(self, method, url, body, headers): - if method == "GET": - body = self.fixtures.load('v1_slug_loadbalancers.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - elif method == "POST": - body_json = json.loads(body) - self.assertEqual(body_json['loadBalancer']['protocol'], 'HTTP') - self.assertEqual(body_json['loadBalancer']['algorithm'], 'ROUND_ROBIN') - - body = self.fixtures.load('v1_slug_loadbalancers_post.json') - return (httplib.ACCEPTED, body, {}, - httplib.responses[httplib.ACCEPTED]) - - raise NotImplementedError - - def _v1_0_slug_loadbalancers_8155(self, method, url, body, headers): - if method == "DELETE": - return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) - - raise NotImplementedError - - def _v1_0_slug_loadbalancers_8290(self, method, url, body, headers): - body = self.fixtures.load('v1_slug_loadbalancers_8290.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - - def _v1_0_slug_loadbalancers_8290_nodes(self, method, url, body, headers): - if method == "GET": - body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') - return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - elif method == "POST": - body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes_post.json') - return (httplib.ACCEPTED, body, {}, - httplib.responses[httplib.ACCEPTED]) - - raise NotImplementedError - - def _v1_0_slug_loadbalancers_8290_nodes_30944(self, method, url, body, headers): - if method == "DELETE": - return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) - - raise NotImplementedError - -if __name__ == "__main__": - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/pricing_test.json libcloud-0.15.1/test/pricing_test.json --- libcloud-0.5.0/test/pricing_test.json 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/pricing_test.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{ - "foo": { - "1": 1.00, - "2": 2.00 - } -} diff -Nru libcloud-0.5.0/test/secrets.py libcloud-0.15.1/test/secrets.py --- libcloud-0.5.0/test/secrets.py 2011-04-10 17:14:28.000000000 +0000 +++ libcloud-0.15.1/test/secrets.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make a copy of this file named 'secrets.py' and add your credentials there. -# Note you can run unit tests without setting your credentials. - -# for test_ec2.py -EC2_ACCESS_ID='YoUR K3Y' -EC2_SECRET='secr3t' - -BRIGHTBOX_CLIENT_ID = '' -BRIGHTBOX_CLIENT_SECRET = '' - -BLUEBOX_CUSTOMER_ID = '' -BLUEBOX_API_KEY = '' - -RACKSPACE_USER = '' -RACKSPACE_KEY = '' - -SLICEHOST_KEY = '' - -VPSNET_USER = '' -VPSNET_KEY = '' - -GOGRID_API_KEY = '' -GOGRID_SECRET = '' - -LINODE_KEY = '' - -HOSTINGCOM_USER = '' -HOSTINGCOM_SECRET = '' - -TERREMARK_USER = '' -TERREMARK_SECRET = '' - -SOFTLAYER_USER = '' -SOFTLAYER_APIKEY = '' - -VOXEL_KEY = '' -VOXEL_SECRET = '' - -ECP_USER_NAME = '' -ECP_PASSWORD = '' - -IBM_USER = '' -IBM_SECRET = '' - -DREAMHOST_KEY='' - -GANDI_USER = '' - -OPENNEBULA_USER = '' -OPENNEBULA_KEY = '' - -OPSOURCE_USER='' -OPSOURCE_PASS='' diff -Nru libcloud-0.5.0/test/secrets.py-dist libcloud-0.15.1/test/secrets.py-dist --- libcloud-0.5.0/test/secrets.py-dist 2011-04-10 13:28:22.000000000 +0000 +++ libcloud-0.15.1/test/secrets.py-dist 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make a copy of this file named 'secrets.py' and add your credentials there. -# Note you can run unit tests without setting your credentials. - -# for test_ec2.py -EC2_ACCESS_ID='YoUR K3Y' -EC2_SECRET='secr3t' - -BRIGHTBOX_CLIENT_ID = '' -BRIGHTBOX_CLIENT_SECRET = '' - -BLUEBOX_CUSTOMER_ID = '' -BLUEBOX_API_KEY = '' - -RACKSPACE_USER = '' -RACKSPACE_KEY = '' - -SLICEHOST_KEY = '' - -VPSNET_USER = '' -VPSNET_KEY = '' - -GOGRID_API_KEY = '' -GOGRID_SECRET = '' - -LINODE_KEY = '' - -HOSTINGCOM_USER = '' -HOSTINGCOM_SECRET = '' - -TERREMARK_USER = '' -TERREMARK_SECRET = '' - -SOFTLAYER_USER = '' -SOFTLAYER_APIKEY = '' - -VOXEL_KEY = '' -VOXEL_SECRET = '' - -ECP_USER_NAME = '' -ECP_PASSWORD = '' - -IBM_USER = '' -IBM_SECRET = '' - -DREAMHOST_KEY='' - -GANDI_USER = '' - -OPENNEBULA_USER = '' -OPENNEBULA_KEY = '' - -OPSOURCE_USER='' -OPSOURCE_PASS='' diff -Nru libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_container_objects_empty.json libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_container_objects_empty.json --- libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_container_objects_empty.json 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_container_objects_empty.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{} diff -Nru libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_container_objects.json libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_container_objects.json --- libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_container_objects.json 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_container_objects.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -[ - {"name":"foo test 1","hash":"16265549b5bda64ecdaa5156de4c97cc", - "bytes":1160520,"content_type":"application/zip", - "last_modified":"2011-01-25T22:01:50.351810"}, - {"name":"foo test 2","hash":"16265549b5bda64ecdaa5156de4c97bb", - "bytes":1160520,"content_type":"application/zip", - "last_modified":"2011-01-25T22:01:50.351810"}, - {"name":"foo tes 3","hash":"16265549b5bda64ecdaa5156de4c97ee", - "bytes":1160520,"content_type":"application/zip", - "last_modified":"2011-01-25T22:01:46.549890"}, - {"name":"foo test 3","hash":"16265549b5bda64ecdaa5156de4c97ff", - "bytes":1160520,"content_type":"application/text", - "last_modified":"2011-01-25T22:01:50.351810"} -] diff -Nru libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_containers_empty.json libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_containers_empty.json --- libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_containers_empty.json 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_containers_empty.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{} diff -Nru libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_containers.json libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_containers.json --- libcloud-0.5.0/test/storage/fixtures/cloudfiles/list_containers.json 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/cloudfiles/list_containers.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -[ - {"name":"container1","count":4,"bytes":3484450}, - {"name":"container2","count":120,"bytes":340084450}, - {"name":"container3","count":0,"bytes":0} -] diff -Nru libcloud-0.5.0/test/storage/fixtures/cloudfiles/meta_data.json libcloud-0.15.1/test/storage/fixtures/cloudfiles/meta_data.json --- libcloud-0.5.0/test/storage/fixtures/cloudfiles/meta_data.json 2011-03-24 19:47:21.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/cloudfiles/meta_data.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"bytes_used": 1234567, "container_count": 10, "object_count": 400} diff -Nru libcloud-0.5.0/test/storage/fixtures/s3/list_container_objects_empty.xml libcloud-0.15.1/test/storage/fixtures/s3/list_container_objects_empty.xml --- libcloud-0.5.0/test/storage/fixtures/s3/list_container_objects_empty.xml 2011-04-09 20:18:26.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/s3/list_container_objects_empty.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ - - - test_container - - - 1000 - false - diff -Nru libcloud-0.5.0/test/storage/fixtures/s3/list_container_objects.xml libcloud-0.15.1/test/storage/fixtures/s3/list_container_objects.xml --- libcloud-0.5.0/test/storage/fixtures/s3/list_container_objects.xml 2011-04-09 20:15:14.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/s3/list_container_objects.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ - - - test_container - - - 1000 - false - - 1.zip - 2011-04-09T19:05:18.000Z - "4397da7a7649e8085de9916c240e8166" - 1234567 - - 65a011niqo39cdf8ec533ec3d1ccaafsa932 - - STANDARD - - diff -Nru libcloud-0.5.0/test/storage/fixtures/s3/list_containers_empty.xml libcloud-0.15.1/test/storage/fixtures/s3/list_containers_empty.xml --- libcloud-0.5.0/test/storage/fixtures/s3/list_containers_empty.xml 2011-04-09 19:59:16.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/s3/list_containers_empty.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ - - - - af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 - foobar - - - - diff -Nru libcloud-0.5.0/test/storage/fixtures/s3/list_containers.xml libcloud-0.15.1/test/storage/fixtures/s3/list_containers.xml --- libcloud-0.5.0/test/storage/fixtures/s3/list_containers.xml 2011-04-09 20:03:15.000000000 +0000 +++ libcloud-0.15.1/test/storage/fixtures/s3/list_containers.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ - - - af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 - foobar - - - - test1 - 2011-04-09T12:34:49.000Z - - - test2 - 2011-02-09T12:34:49.000Z - - - diff -Nru libcloud-0.5.0/test/storage/test_cloudfiles.py libcloud-0.15.1/test/storage/test_cloudfiles.py --- libcloud-0.5.0/test/storage/test_cloudfiles.py 2011-05-15 16:20:55.000000000 +0000 +++ libcloud-0.15.1/test/storage/test_cloudfiles.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,662 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import os.path # pylint: disable-msg=W0404 -import sys -import copy -import unittest -import httplib - -import libcloud.utils - -from libcloud.common.types import LibcloudError, MalformedResponseError -from libcloud.storage.base import Container, Object -from libcloud.storage.types import ContainerAlreadyExistsError -from libcloud.storage.types import ContainerDoesNotExistError -from libcloud.storage.types import ContainerIsNotEmptyError -from libcloud.storage.types import ObjectDoesNotExistError -from libcloud.storage.types import ObjectHashMismatchError -from libcloud.storage.types import InvalidContainerNameError -from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver -from libcloud.storage.drivers.dummy import DummyIterator - -from test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 -from test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 - -current_hash = None - -class CloudFilesTests(unittest.TestCase): - - def setUp(self): - CloudFilesStorageDriver.connectionCls.conn_classes = ( - None, CloudFilesMockHttp) - CloudFilesStorageDriver.connectionCls.rawResponseCls = \ - CloudFilesMockRawResponse - CloudFilesMockHttp.type = None - CloudFilesMockRawResponse.type = None - self.driver = CloudFilesStorageDriver('dummy', 'dummy') - self._remove_test_file() - - def tearDown(self): - self._remove_test_file() - - def test_invalid_json_throws_exception(self): - CloudFilesMockHttp.type = 'MALFORMED_JSON' - try: - self.driver.list_containers() - except MalformedResponseError: - pass - else: - self.fail('Exception was not thrown') - - def test_list_containers(self): - CloudFilesMockHttp.type = 'EMPTY' - containers = self.driver.list_containers() - self.assertEqual(len(containers), 0) - - CloudFilesMockHttp.type = None - containers = self.driver.list_containers() - self.assertEqual(len(containers), 3) - - container = [c for c in containers if c.name == 'container2'][0] - self.assertEqual(container.extra['object_count'], 120) - self.assertEqual(container.extra['size'], 340084450) - - def test_list_container_objects(self): - CloudFilesMockHttp.type = 'EMPTY' - container = Container( - name='test_container', extra={}, driver=self.driver) - objects = self.driver.list_container_objects(container=container) - self.assertEqual(len(objects), 0) - - CloudFilesMockHttp.type = None - objects = self.driver.list_container_objects(container=container) - self.assertEqual(len(objects), 4) - - obj = [o for o in objects if o.name == 'foo test 1'][0] - self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') - self.assertEqual(obj.size, 1160520) - self.assertEqual(obj.container.name, 'test_container') - - def test_get_container(self): - container = self.driver.get_container(container_name='test_container') - self.assertEqual(container.name, 'test_container') - self.assertEqual(container.extra['object_count'], 800) - self.assertEqual(container.extra['size'], 1234568) - - def test_get_container_not_found(self): - try: - self.driver.get_container(container_name='not_found') - except ContainerDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_get_object_success(self): - obj = self.driver.get_object(container_name='test_container', - object_name='test_object') - self.assertEqual(obj.container.name, 'test_container') - self.assertEqual(obj.size, 555) - self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') - self.assertEqual(obj.extra['content_type'], 'application/zip') - self.assertEqual( - obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') - self.assertEqual(obj.meta_data['foo-bar'], 'test 1') - self.assertEqual(obj.meta_data['bar-foo'], 'test 2') - - def test_get_object_not_found(self): - try: - self.driver.get_object(container_name='test_container', - object_name='not_found') - except ObjectDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_create_container_success(self): - container = self.driver.create_container( - container_name='test_create_container') - self.assertTrue(isinstance(container, Container)) - self.assertEqual(container.name, 'test_create_container') - self.assertEqual(container.extra['object_count'], 0) - - def test_create_container_already_exists(self): - CloudFilesMockHttp.type = 'ALREADY_EXISTS' - - try: - self.driver.create_container( - container_name='test_create_container') - except ContainerAlreadyExistsError: - pass - else: - self.fail( - 'Container already exists but an exception was not thrown') - - def test_create_container_invalid_name_too_long(self): - name = ''.join([ 'x' for x in range(0, 257)]) - try: - self.driver.create_container(container_name=name) - except InvalidContainerNameError: - pass - else: - self.fail( - 'Invalid name was provided (name is too long)' - ', but exception was not thrown') - - def test_create_container_invalid_name_slashes_in_name(self): - try: - self.driver.create_container(container_name='test/slashes/') - except InvalidContainerNameError: - pass - else: - self.fail( - 'Invalid name was provided (name contains slashes)' - ', but exception was not thrown') - - def test_delete_container_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - result = self.driver.delete_container(container=container) - self.assertTrue(result) - - def test_delete_container_not_found(self): - CloudFilesMockHttp.type = 'NOT_FOUND' - container = Container(name='foo_bar_container', extra={}, driver=self) - try: - self.driver.delete_container(container=container) - except ContainerDoesNotExistError: - pass - else: - self.fail( - 'Container does not exist but an exception was not thrown') - - def test_delete_container_not_empty(self): - CloudFilesMockHttp.type = 'NOT_EMPTY' - container = Container(name='foo_bar_container', extra={}, driver=self) - try: - self.driver.delete_container(container=container) - except ContainerIsNotEmptyError: - pass - else: - self.fail('Container is not empty but an exception was not thrown') - - def test_download_object_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=CloudFilesStorageDriver) - destination_path = os.path.abspath(__file__) + '.temp' - result = self.driver.download_object(obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - self.assertTrue(result) - - def test_download_object_invalid_file_size(self): - CloudFilesMockRawResponse.type = 'INVALID_SIZE' - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=CloudFilesStorageDriver) - destination_path = os.path.abspath(__file__) + '.temp' - result = self.driver.download_object(obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - self.assertFalse(result) - - def test_download_object_success_not_found(self): - CloudFilesMockRawResponse.type = 'NOT_FOUND' - container = Container(name='foo_bar_container', extra={}, driver=self) - - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, - meta_data=None, - driver=CloudFilesStorageDriver) - destination_path = os.path.abspath(__file__) + '.temp' - try: - self.driver.download_object( - obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - except ObjectDoesNotExistError: - pass - else: - self.fail('Object does not exist but an exception was not thrown') - - def test_download_object_as_stream(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=CloudFilesStorageDriver) - - stream = self.driver.download_object_as_stream(obj=obj, chunk_size=None) - self.assertTrue(hasattr(stream, '__iter__')) - - def test_upload_object_success(self): - def upload_file(self, response, file_path, chunked=False, - calculate_hash=True): - return True, 'hash343hhash89h932439jsaa89', 1000 - - old_func = CloudFilesStorageDriver._upload_file - CloudFilesStorageDriver._upload_file = upload_file - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - extra = {'meta_data': { 'some-value': 'foobar'}} - obj = self.driver.upload_object(file_path=file_path, container=container, - extra=extra, object_name=object_name) - self.assertEqual(obj.name, 'foo_test_upload') - self.assertEqual(obj.size, 1000) - self.assertTrue('some-value' in obj.meta_data) - CloudFilesStorageDriver._upload_file = old_func - - def test_upload_object_invalid_hash(self): - def upload_file(self, response, file_path, chunked=False, - calculate_hash=True): - return True, 'hash343hhash89h932439jsaa89', 1000 - - CloudFilesMockRawResponse.type = 'INVALID_HASH' - - old_func = CloudFilesStorageDriver._upload_file - CloudFilesStorageDriver._upload_file = upload_file - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name, - verify_hash=True) - except ObjectHashMismatchError: - pass - else: - self.fail( - 'Invalid hash was returned but an exception was not thrown') - finally: - CloudFilesStorageDriver._upload_file = old_func - - def test_upload_object_no_content_type(self): - def no_content_type(name): - return None, None - - old_func = libcloud.utils.guess_file_mime_type - libcloud.utils.guess_file_mime_type = no_content_type - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name) - except AttributeError: - pass - else: - self.fail( - 'File content type not provided' - ' but an exception was not thrown') - finally: - libcloud.utils.guess_file_mime_type = old_func - - def test_upload_object_error(self): - def dummy_content_type(name): - return 'application/zip', None - - def send(instance): - raise Exception('') - - old_func1 = libcloud.utils.guess_file_mime_type - libcloud.utils.guess_file_mime_type = dummy_content_type - old_func2 = CloudFilesMockHttp.send - CloudFilesMockHttp.send = send - - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object( - file_path=file_path, - container=container, - object_name=object_name) - except LibcloudError: - pass - else: - self.fail('Timeout while uploading but an exception was not thrown') - finally: - libcloud.utils.guess_file_mime_type = old_func1 - CloudFilesMockHttp.send = old_func2 - - def test_upload_object_inexistent_file(self): - def dummy_content_type(name): - return 'application/zip', None - - old_func = libcloud.utils.guess_file_mime_type - libcloud.utils.guess_file_mime_type = dummy_content_type - - file_path = os.path.abspath(__file__ + '.inexistent') - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object( - file_path=file_path, - container=container, - object_name=object_name) - except OSError: - pass - else: - self.fail('Inesitent but an exception was not thrown') - finally: - libcloud.utils.guess_file_mime_type = old_func - - def test_upload_object_via_stream(self): - def dummy_content_type(name): - return 'application/zip', None - - old_func = libcloud.utils.guess_file_mime_type - libcloud.utils.guess_file_mime_type = dummy_content_type - - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_stream_data' - iterator = DummyIterator(data=['2', '3', '5']) - try: - self.driver.upload_object_via_stream(container=container, - object_name=object_name, - iterator=iterator) - finally: - libcloud.utils.guess_file_mime_type = old_func - - def test_delete_object_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=CloudFilesStorageDriver) - status = self.driver.delete_object(obj=obj) - self.assertTrue(status) - - def test_delete_object_not_found(self): - CloudFilesMockHttp.type = 'NOT_FOUND' - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=CloudFilesStorageDriver) - try: - self.driver.delete_object(obj=obj) - except ObjectDoesNotExistError: - pass - else: - self.fail('Object does not exist but an exception was not thrown') - - def test_ex_get_meta_data(self): - meta_data = self.driver.ex_get_meta_data() - self.assertTrue(isinstance(meta_data, dict)) - self.assertTrue('object_count' in meta_data) - self.assertTrue('container_count' in meta_data) - self.assertTrue('bytes_used' in meta_data) - - def _remove_test_file(self): - file_path = os.path.abspath(__file__) + '.temp' - - try: - os.unlink(file_path) - except OSError: - pass - -class CloudFilesMockHttp(StorageMockHttp): - - fixtures = StorageFileFixtures('cloudfiles') - base_headers = { 'content-type': 'application/json; charset=UTF-8'} - - # fake auth token response - def _v1_0(self, method, url, body, headers): - headers = copy.deepcopy(self.base_headers) - headers.update({ 'x-server-management-url': - 'https://servers.api.rackspacecloud.com/v1.0/slug', - 'x-auth-token': 'FE011C19', - 'x-cdn-management-url': - 'https://cdn.clouddrive.com/v1/MossoCloudFS', - 'x-storage-token': 'FE011C19', - 'x-storage-url': - 'https://storage4.clouddrive.com/v1/MossoCloudFS'}) - return (httplib.NO_CONTENT, - "", - headers, - httplib.responses[httplib.NO_CONTENT]) - - def _v1_MossoCloudFS_MALFORMED_JSON(self, method, url, body, headers): - # test_invalid_json_throws_exception - body = 'broken: json /*"' - return (httplib.NO_CONTENT, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_EMPTY(self, method, url, body, headers): - return (httplib.NO_CONTENT, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS(self, method, url, body, headers): - headers = copy.deepcopy(self.base_headers) - if method == 'GET': - # list_containers - body = self.fixtures.load('list_containers.json') - status_code = httplib.OK - elif method == 'HEAD': - # get_meta_data - body = self.fixtures.load('meta_data.json') - status_code = httplib.NO_CONTENT - headers.update({ 'x-account-container-count': 10, - 'x-account-object-count': 400, - 'x-account-bytes-used': 1234567 - }) - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_not_found(self, method, url, body, headers): - # test_get_object_not_found - if method == 'HEAD': - body = '' - else: - raise ValueError('Invalid method') - - return (httplib.NOT_FOUND, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_container_EMPTY(self, method, url, body, headers): - body = self.fixtures.load('list_container_objects_empty.json') - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_container(self, method, url, body, headers): - headers = copy.deepcopy(self.base_headers) - if method == 'GET': - # list_container_objects - body = self.fixtures.load('list_container_objects.json') - status_code = httplib.OK - elif method == 'HEAD': - # get_container - body = self.fixtures.load('list_container_objects_empty.json') - status_code = httplib.NO_CONTENT - headers.update({ 'x-container-object-count': 800, - 'x-container-bytes-used': 1234568 - }) - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_container_not_found( - self, method, url, body, headers): - # test_get_container_not_found - if method == 'HEAD': - body = '' - else: - raise ValueError('Invalid method') - - return (httplib.NOT_FOUND, body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_container_test_object( - self, method, url, body, headers): - headers = copy.deepcopy(self.base_headers) - if method == 'HEAD': - # get_object - body = self.fixtures.load('list_container_objects_empty.json') - status_code = httplib.NO_CONTENT - headers.update({ 'content-length': 555, - 'last-modified': 'Tue, 25 Jan 2011 22:01:49 GMT', - 'etag': '6b21c4a111ac178feacf9ec9d0c71f17', - 'x-object-meta-foo-bar': 'test 1', - 'x-object-meta-bar-foo': 'test 2', - 'content-type': 'application/zip'}) - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_create_container( - self, method, url, body, headers): - # test_create_container_success - headers = copy.deepcopy(self.base_headers) - body = self.fixtures.load('list_container_objects_empty.json') - headers = copy.deepcopy(self.base_headers) - headers.update({ 'content-length': 18, - 'date': 'Mon, 28 Feb 2011 07:52:57 GMT' - }) - status_code = httplib.CREATED - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_test_create_container_ALREADY_EXISTS( - self, method, url, body, headers): - # test_create_container_already_exists - headers = copy.deepcopy(self.base_headers) - body = self.fixtures.load('list_container_objects_empty.json') - headers.update({ 'content-type': 'text/plain' }) - status_code = httplib.ACCEPTED - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container(self, method, url, body, headers): - if method == 'DELETE': - # test_delete_container_success - body = self.fixtures.load('list_container_objects_empty.json') - headers = self.base_headers - status_code = httplib.NO_CONTENT - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_NOT_FOUND( - self, method, url, body, headers): - - if method == 'DELETE': - # test_delete_container_not_found - body = self.fixtures.load('list_container_objects_empty.json') - headers = self.base_headers - status_code = httplib.NOT_FOUND - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_NOT_EMPTY( - self, method, url, body, headers): - - if method == 'DELETE': - # test_delete_container_not_empty - body = self.fixtures.load('list_container_objects_empty.json') - headers = self.base_headers - status_code = httplib.CONFLICT - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( - self, method, url, body, headers): - - if method == 'DELETE': - # test_delete_object_success - body = self.fixtures.load('list_container_objects_empty.json') - headers = self.base_headers - status_code = httplib.NO_CONTENT - return (status_code, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( - self, method, url, body, headers): - - if method == 'DELETE': - # test_delete_object_success - body = self.fixtures.load('list_container_objects_empty.json') - headers = self.base_headers - status_code = httplib.NOT_FOUND - - return (status_code, body, headers, httplib.responses[httplib.OK]) - -class CloudFilesMockRawResponse(MockRawResponse): - - fixtures = StorageFileFixtures('cloudfiles') - base_headers = { 'content-type': 'application/json; charset=UTF-8'} - - def _v1_MossoCloudFS_foo_bar_container_foo_test_upload( - self, method, url, body, headers): - # test_object_upload_success - - body = '' - headers = {} - headers.update(self.base_headers) - headers['etag'] = 'hash343hhash89h932439jsaa89' - return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_test_upload_INVALID_HASH( - self, method, url, body, headers): - # test_object_upload_invalid_hash - body = '' - headers = {} - headers.update(self.base_headers) - headers['etag'] = 'foobar' - return (httplib.CREATED, body, headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( - self, method, url, body, headers): - - # test_download_object_success - body = 'test' - self._data = self._generate_random_data(1000) - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_INVALID_SIZE( - self, method, url, body, headers): - # test_download_object_invalid_file_size - body = 'test' - self._data = self._generate_random_data(100) - return (httplib.OK, body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( - self, method, url, body, headers): - body = '' - return (httplib.NOT_FOUND, body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _v1_MossoCloudFS_foo_bar_container_foo_test_stream_data( - self, method, url, body, headers): - - # test_upload_object_via_stream_success - headers = {} - headers.update(self.base_headers) - headers['etag'] = '577ef1154f3240ad5b9b413aa7346a1e' - body = 'test' - return (httplib.CREATED, - body, - headers, - httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/storage/test_s3.py libcloud-0.15.1/test/storage/test_s3.py --- libcloud-0.5.0/test/storage/test_s3.py 2011-05-15 21:00:25.000000000 +0000 +++ libcloud-0.15.1/test/storage/test_s3.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,594 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import httplib -import unittest - -from libcloud.common.types import InvalidCredsError -from libcloud.common.types import LibcloudError -from libcloud.storage.base import Container, Object -from libcloud.storage.types import ContainerDoesNotExistError -from libcloud.storage.types import ContainerIsNotEmptyError -from libcloud.storage.types import InvalidContainerNameError -from libcloud.storage.types import ObjectDoesNotExistError -from libcloud.storage.types import ObjectHashMismatchError -from libcloud.storage.drivers.s3 import S3StorageDriver, S3USWestStorageDriver -from libcloud.storage.drivers.s3 import S3EUWestStorageDriver -from libcloud.storage.drivers.s3 import S3APSEStorageDriver -from libcloud.storage.drivers.s3 import S3APNEStorageDriver -from libcloud.storage.drivers.dummy import DummyIterator - -from test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 -from test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 - -class S3Tests(unittest.TestCase): - - def setUp(self): - S3StorageDriver.connectionCls.conn_classes = (None, S3MockHttp) - S3StorageDriver.connectionCls.rawResponseCls = S3MockRawResponse - S3MockHttp.type = None - S3MockRawResponse.type = None - self.driver = S3StorageDriver('dummy', 'dummy') - - def tearDown(self): - self._remove_test_file() - - def _remove_test_file(self): - file_path = os.path.abspath(__file__) + '.temp' - - try: - os.unlink(file_path) - except OSError: - pass - - def test_invalid_credentials(self): - S3MockHttp.type = 'UNAUTHORIZED' - try: - self.driver.list_containers() - except InvalidCredsError, e: - self.assertEqual(True, isinstance(e, InvalidCredsError)) - else: - self.fail('Exception was not thrown') - - def test_bucket_is_located_in_different_region(self): - S3MockHttp.type = 'DIFFERENT_REGION' - try: - self.driver.list_containers() - except LibcloudError: - pass - else: - self.fail('Exception was not thrown') - - def test_list_containers_empty(self): - S3MockHttp.type = 'list_containers_EMPTY' - containers = self.driver.list_containers() - self.assertEqual(len(containers), 0) - - def test_list_containers_success(self): - S3MockHttp.type = 'list_containers' - containers = self.driver.list_containers() - self.assertEqual(len(containers), 2) - - self.assertTrue('creation_date' in containers[1].extra) - - def test_list_container_objects_empty(self): - S3MockHttp.type = 'EMPTY' - container = Container(name='test_container', extra={}, - driver=self.driver) - objects = self.driver.list_container_objects(container=container) - self.assertEqual(len(objects), 0) - - def test_list_container_objects_success(self): - S3MockHttp.type = None - container = Container(name='test_container', extra={}, - driver=self.driver) - objects = self.driver.list_container_objects(container=container) - self.assertEqual(len(objects), 1) - - obj = [o for o in objects if o.name == '1.zip'][0] - self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') - self.assertEqual(obj.size, 1234567) - self.assertEqual(obj.container.name, 'test_container') - self.assertTrue('owner' in obj.meta_data) - - def test_get_container_doesnt_exist(self): - S3MockHttp.type = 'list_containers' - try: - self.driver.get_container(container_name='container1') - except ContainerDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_get_container_success(self): - S3MockHttp.type = 'list_containers' - container = self.driver.get_container(container_name='test1') - self.assertTrue(container.name, 'test1') - - def test_get_object_container_doesnt_exist(self): - # This method makes two requests which makes mocking the response a bit - # trickier - S3MockHttp.type = 'list_containers' - try: - self.driver.get_object(container_name='test-inexistent', - object_name='test') - except ContainerDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_get_object_success(self): - # This method makes two requests which makes mocking the response a bit - # trickier - S3MockHttp.type = 'list_containers' - obj = self.driver.get_object(container_name='test2', - object_name='test') - - self.assertEqual(obj.name, 'test') - self.assertEqual(obj.container.name, 'test2') - self.assertEqual(obj.size, 12345) - self.assertEqual(obj.hash, 'e31208wqsdoj329jd') - - def test_create_container_invalid_name(self): - # invalid container name - S3MockHttp.type = 'INVALID_NAME' - try: - self.driver.create_container(container_name='new_container') - except InvalidContainerNameError: - pass - else: - self.fail('Exception was not thrown') - - def test_create_container_already_exists(self): - # container with this name already exists - S3MockHttp.type = 'ALREADY_EXISTS' - try: - self.driver.create_container(container_name='new-container') - except InvalidContainerNameError: - pass - else: - self.fail('Exception was not thrown') - - def test_create_container_success(self): - # success - S3MockHttp.type = None - container = self.driver.create_container(container_name='new_container') - self.assertEqual(container.name, 'new_container') - - def test_delete_container_doesnt_exist(self): - container = Container(name='new_container', extra=None, driver=self) - S3MockHttp.type = 'DOESNT_EXIST' - try: - self.driver.delete_container(container=container) - except ContainerDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_delete_container_not_empty(self): - container = Container(name='new_container', extra=None, driver=self) - S3MockHttp.type = 'NOT_EMPTY' - try: - self.driver.delete_container(container=container) - except ContainerIsNotEmptyError: - pass - else: - self.fail('Exception was not thrown') - - # success - S3MockHttp.type = None - self.assertTrue(self.driver.delete_container(container=container)) - - def test_delete_container_not_found(self): - S3MockHttp.type = 'NOT_FOUND' - container = Container(name='foo_bar_container', extra={}, driver=self) - try: - self.driver.delete_container(container=container) - except ContainerDoesNotExistError: - pass - else: - self.fail('Container does not exist but an exception was not thrown') - - def test_delete_container_success(self): - S3MockHttp.type = None - container = Container(name='new_container', extra=None, driver=self) - self.assertTrue(self.driver.delete_container(container=container)) - - def test_download_object_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=S3StorageDriver) - destination_path = os.path.abspath(__file__) + '.temp' - result = self.driver.download_object(obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - self.assertTrue(result) - - def test_download_object_invalid_file_size(self): - S3MockRawResponse.type = 'INVALID_SIZE' - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=S3StorageDriver) - destination_path = os.path.abspath(__file__) + '.temp' - result = self.driver.download_object(obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - self.assertFalse(result) - - def test_download_object_invalid_file_already_exists(self): - S3MockRawResponse.type = 'INVALID_SIZE' - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=S3StorageDriver) - destination_path = os.path.abspath(__file__) - try: - self.driver.download_object(obj=obj, - destination_path=destination_path, - overwrite_existing=False, - delete_on_failure=True) - except LibcloudError: - pass - else: - self.fail('Exception was not thrown') - - def test_download_object_as_stream_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - - obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, - container=container, meta_data=None, - driver=S3StorageDriver) - - stream = self.driver.download_object_as_stream(obj=obj, chunk_size=None) - self.assertTrue(hasattr(stream, '__iter__')) - - def test_upload_object_invalid_ex_storage_class(self): - # Invalid hash is detected on the amazon side and BAD_REQUEST is - # returned - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name, - verify_hash=True, - ex_storage_class='invalid-class') - except ValueError, e: - self.assertTrue(str(e).lower().find('invalid storage class') != -1) - pass - else: - self.fail('Exception was not thrown') - - def test_upload_object_invalid_hash1(self): - # Invalid hash is detected on the amazon side and BAD_REQUEST is - # returned - def upload_file(self, response, file_path, chunked=False, - calculate_hash=True): - return True, 'hash343hhash89h932439jsaa89', 1000 - - S3MockRawResponse.type = 'INVALID_HASH1' - - old_func = S3StorageDriver._upload_file - S3StorageDriver._upload_file = upload_file - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name, - verify_hash=True) - except ObjectHashMismatchError: - pass - else: - self.fail( - 'Invalid hash was returned but an exception was not thrown') - finally: - S3StorageDriver._upload_file = old_func - - def test_upload_object_invalid_hash2(self): - # Invalid hash is detected when comparing hash provided in the response - # ETag header - def upload_file(self, response, file_path, chunked=False, - calculate_hash=True): - return True, '0cc175b9c0f1b6a831c399e269772661', 1000 - - S3MockRawResponse.type = 'INVALID_HASH2' - - old_func = S3StorageDriver._upload_file - S3StorageDriver._upload_file = upload_file - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - try: - self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name, - verify_hash=True) - except ObjectHashMismatchError: - pass - else: - self.fail( - 'Invalid hash was returned but an exception was not thrown') - finally: - S3StorageDriver._upload_file = old_func - - def test_upload_object_success(self): - def upload_file(self, response, file_path, chunked=False, - calculate_hash=True): - return True, '0cc175b9c0f1b6a831c399e269772661', 1000 - - old_func = S3StorageDriver._upload_file - S3StorageDriver._upload_file = upload_file - file_path = os.path.abspath(__file__) - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_upload' - extra = {'meta_data': { 'some-value': 'foobar'}} - obj = self.driver.upload_object(file_path=file_path, container=container, - object_name=object_name, - extra=extra, - verify_hash=True) - self.assertEqual(obj.name, 'foo_test_upload') - self.assertEqual(obj.size, 1000) - self.assertTrue('some-value' in obj.meta_data) - S3StorageDriver._upload_file = old_func - - def test_upload_object_via_stream(self): - try: - container = Container(name='foo_bar_container', extra={}, driver=self) - object_name = 'foo_test_stream_data' - iterator = DummyIterator(data=['2', '3', '5']) - self.driver.upload_object_via_stream(container=container, - object_name=object_name, - iterator=iterator) - except NotImplementedError: - pass - else: - self.fail('Exception was not thrown') - - def test_delete_object_not_found(self): - S3MockHttp.type = 'NOT_FOUND' - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, - meta_data=None, container=container, driver=self.driver) - try: - self.driver.delete_object(obj=obj) - except ObjectDoesNotExistError: - pass - else: - self.fail('Exception was not thrown') - - def test_delete_object_success(self): - container = Container(name='foo_bar_container', extra={}, driver=self) - obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, - meta_data=None, container=container, driver=self.driver) - - result = self.driver.delete_object(obj=obj) - self.assertTrue(result) - -class S3USWestTests(S3Tests): - def setUp(self): - S3USWestStorageDriver.connectionCls.conn_classes = (None, S3MockHttp) - S3USWestStorageDriver.connectionCls.rawResponseCls = S3MockRawResponse - S3MockHttp.type = None - S3MockRawResponse.type = None - self.driver = S3USWestStorageDriver('dummy', 'dummy') - -class S3EUWestTests(S3Tests): - def setUp(self): - S3EUWestStorageDriver.connectionCls.conn_classes = (None, S3MockHttp) - S3EUWestStorageDriver.connectionCls.rawResponseCls = S3MockRawResponse - S3MockHttp.type = None - S3MockRawResponse.type = None - self.driver = S3EUWestStorageDriver('dummy', 'dummy') - -class S3APSETests(S3Tests): - def setUp(self): - S3APSEStorageDriver.connectionCls.conn_classes = (None, S3MockHttp) - S3APSEStorageDriver.connectionCls.rawResponseCls = S3MockRawResponse - S3MockHttp.type = None - S3MockRawResponse.type = None - self.driver = S3APSEStorageDriver('dummy', 'dummy') - -class S3APNETests(S3Tests): - def setUp(self): - S3APNEStorageDriver.connectionCls.conn_classes = (None, S3MockHttp) - S3APNEStorageDriver.connectionCls.rawResponseCls = S3MockRawResponse - S3MockHttp.type = None - S3MockRawResponse.type = None - self.driver = S3APNEStorageDriver('dummy', 'dummy') - -class S3MockHttp(StorageMockHttp): - - fixtures = StorageFileFixtures('s3') - base_headers = {} - - def _UNAUTHORIZED(self, method, url, body, headers): - return (httplib.UNAUTHORIZED, - '', - self.base_headers, - httplib.responses[httplib.OK]) - - def _DIFFERENT_REGION(self, method, url, body, headers): - return (httplib.MOVED_PERMANENTLY, - '', - self.base_headers, - httplib.responses[httplib.OK]) - - def _list_containers_EMPTY(self, method, url, body, headers): - body = self.fixtures.load('list_containers_empty.xml') - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _list_containers(self, method, url, body, headers): - body = self.fixtures.load('list_containers.xml') - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _test_container_EMPTY(self, method, url, body, headers): - body = self.fixtures.load('list_container_objects_empty.xml') - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _test_container(self, method, url, body, headers): - body = self.fixtures.load('list_container_objects.xml') - return (httplib.OK, - body, - self.base_headers, - httplib.responses[httplib.OK]) - - def _test2_test_list_containers(self, method, url, body, headers): - # test_get_object - body = self.fixtures.load('list_containers.xml') - headers = { 'content-type': 'application/zip', - 'etag': '"e31208wqsdoj329jd"', - 'content-length': 12345, - } - - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - - def _new_container_INVALID_NAME(self, method, url, body, headers): - # test_create_container - return (httplib.BAD_REQUEST, - body, - headers, - httplib.responses[httplib.OK]) - - def _new_container_ALREADY_EXISTS(self, method, url, body, headers): - # test_create_container - return (httplib.CONFLICT, - body, - headers, - httplib.responses[httplib.OK]) - - def _new_container(self, method, url, body, headers): - # test_create_container, test_delete_container - - if method == 'PUT': - status = httplib.OK - elif method == 'DELETE': - status = httplib.NO_CONTENT - - return (status, - body, - headers, - httplib.responses[httplib.OK]) - - def _new_container_DOESNT_EXIST(self, method, url, body, headers): - # test_delete_container - return (httplib.NOT_FOUND, - body, - headers, - httplib.responses[httplib.OK]) - - def _new_container_NOT_EMPTY(self, method, url, body, headers): - # test_delete_container - return (httplib.CONFLICT, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container(self, method, url, body, headers): - # test_delete_container - return (httplib.NO_CONTENT, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): - # test_delete_container_not_found - return (httplib.NOT_FOUND, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, headers): - # test_delete_object_not_found - return (httplib.NOT_FOUND, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_bar_object(self, method, url, body, headers): - # test_delete_object - return (httplib.NO_CONTENT, - body, - headers, - httplib.responses[httplib.OK]) - -class S3MockRawResponse(MockRawResponse): - - fixtures = StorageFileFixtures('s3') - - def _foo_bar_container_foo_bar_object(self, method, url, body, headers): - # test_download_object_success - body = '' - self._data = self._generate_random_data(1000) - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_test_upload_INVALID_HASH1(self, method, url, body, headers): - body = '' - headers = {} - headers['etag'] = '"foobar"' - # test_upload_object_invalid_hash1 - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_test_upload_INVALID_HASH2(self, method, url, body, headers): - # test_upload_object_invalid_hash2 - body = '' - headers = { 'etag': '"hash343hhash89h932439jsaa89"'} - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_test_upload(self, method, url, body, headers): - # test_upload_object_success - body = '' - headers = { 'etag': '"0cc175b9c0f1b6a831c399e269772661"'} - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - - def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, body, headers): - # test_upload_object_invalid_file_size - body = '' - return (httplib.OK, - body, - headers, - httplib.responses[httplib.OK]) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/test_file_fixtures.py libcloud-0.15.1/test/test_file_fixtures.py --- libcloud-0.5.0/test/test_file_fixtures.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/test_file_fixtures.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import unittest - -from test.file_fixtures import ComputeFileFixtures - -class FileFixturesTests(unittest.TestCase): - - def test_success(self): - f = ComputeFileFixtures('meta') - self.assertEqual("Hello, World!", f.load('helloworld.txt')) - - def test_failure(self): - f = ComputeFileFixtures('meta') - self.assertRaises(IOError, f.load, 'nil') - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/test_httplib_ssl.py libcloud-0.15.1/test/test_httplib_ssl.py --- libcloud-0.5.0/test/test_httplib_ssl.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/test_httplib_ssl.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import unittest -import os.path - -import libcloud.security -from libcloud.httplib_ssl import LibcloudHTTPSConnection - -class TestHttpLibSSLTests(unittest.TestCase): - - def setUp(self): - self.httplib_object = LibcloudHTTPSConnection('foo.bar') - - def test_verify_hostname(self): - cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), - (('stateOrProvinceName', 'Delaware'),), - (('localityName', 'Wilmington'),), - (('organizationName', 'Python Software Foundation'),), - (('organizationalUnitName', 'SSL'),), - (('commonName', 'somemachine.python.org'),))} - - cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), - (('stateOrProvinceName', 'Delaware'),), - (('localityName', 'Wilmington'),), - (('organizationName', 'Python Software Foundation'),), - (('organizationalUnitName', 'SSL'),), - (('commonName', 'somemachine.python.org'),)), - 'subjectAltName': ((('DNS', 'foo.alt.name')), - (('DNS', 'foo.alt.name.1')))} - - self.assertFalse(self.httplib_object._verify_hostname( - hostname='invalid', cert=cert1)) - self.assertTrue(self.httplib_object._verify_hostname( - hostname='somemachine.python.org', cert=cert1)) - - self.assertFalse(self.httplib_object._verify_hostname( - hostname='invalid', cert=cert2)) - self.assertTrue(self.httplib_object._verify_hostname( - hostname='foo.alt.name.1', cert=cert2)) - - def test_get_subject_alt_names(self): - cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), - (('stateOrProvinceName', 'Delaware'),), - (('localityName', 'Wilmington'),), - (('organizationName', 'Python Software Foundation'),), - (('organizationalUnitName', 'SSL'),), - (('commonName', 'somemachine.python.org'),))} - - cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), - (('stateOrProvinceName', 'Delaware'),), - (('localityName', 'Wilmington'),), - (('organizationName', 'Python Software Foundation'),), - (('organizationalUnitName', 'SSL'),), - (('commonName', 'somemachine.python.org'),)), - 'subjectAltName': ((('DNS', 'foo.alt.name')), - (('DNS', 'foo.alt.name.1')))} - - self.assertEqual(self.httplib_object._get_subject_alt_names(cert=cert1), - []) - - alt_names = self.httplib_object._get_subject_alt_names(cert=cert2) - self.assertEqual(len(alt_names), 2) - self.assertTrue('foo.alt.name' in alt_names) - self.assertTrue('foo.alt.name.1' in alt_names) - - def test_get_common_name(self): - cert = {'notAfter': 'Feb 16 16:54:50 2013 GMT', - 'subject': ((('countryName', 'US'),), - (('stateOrProvinceName', 'Delaware'),), - (('localityName', 'Wilmington'),), - (('organizationName', 'Python Software Foundation'),), - (('organizationalUnitName', 'SSL'),), - (('commonName', 'somemachine.python.org'),))} - - self.assertEqual(self.httplib_object._get_common_name(cert)[0], - 'somemachine.python.org') - self.assertEqual(self.httplib_object._get_common_name({}), - None) - - def test_setup_verify(self): - # @TODO: catch warnings - libcloud.security.VERIFY_SSL_CERT = True - self.httplib_object._setup_verify() - - libcloud.security.VERIFY_SSL_CERT = False - self.httplib_object._setup_verify() - - def test_setup_ca_cert(self): - # @TODO: catch warnings - self.httplib_object.verify = False - self.httplib_object._setup_ca_cert() - - self.assertEqual(self.httplib_object.ca_cert, None) - - self.httplib_object.verify = True - - libcloud.security.CA_CERTS_PATH = [os.path.abspath(__file__)] - self.httplib_object._setup_ca_cert() - self.assertTrue(self.httplib_object.ca_cert is not None) - - libcloud.security.CA_CERTS_PATH = [] - self.httplib_object._setup_ca_cert() - self.assertFalse(self.httplib_object.ca_cert) - self.assertFalse(self.httplib_object.verify) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/test/test_pricing.py libcloud-0.15.1/test/test_pricing.py --- libcloud-0.5.0/test/test_pricing.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/test_pricing.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import libcloud.pricing - -class PricingTestCase(unittest.TestCase): - - def test_get_pricing_success(self): - self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) - - pricing = libcloud.pricing.get_pricing(driver_type='compute', - driver_name='foo', - pricing_file_path='test/pricing_test.json') - self.assertEqual(pricing['1'], 1.0) - self.assertEqual(pricing['2'], 2.0) - - self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['1'], 1.0) - self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['2'], 2.0) - - def test_get_pricing_invalid_file_path(self): - try: - libcloud.pricing.get_pricing(driver_type='compute', driver_name='bar', - pricing_file_path='inexistent.json') - except IOError: - pass - else: - self.fail('Invalid pricing file path provided, but an exception was not' - ' thrown') - - def test_get_pricing_invalid_driver_type(self): - try: - libcloud.pricing.get_pricing(driver_type='invalid_type', driver_name='bar', - pricing_file_path='inexistent.json') - except AttributeError: - pass - else: - self.fail('Invalid driver_type provided, but an exception was not' - ' thrown') - - def test_get_pricing_not_in_cache(self): - try: - libcloud.pricing.get_pricing(driver_type='compute', driver_name='inexistent', - pricing_file_path='test/pricing_test.json') - except KeyError: - pass - else: - self.fail('Invalid driver provided, but an exception was not' - ' thrown') - - def test_get_size_price(self): - libcloud.pricing.PRICING_DATA['compute']['foo'] = { 2: 2, '3': 3 } - price1 = libcloud.pricing.get_size_price(driver_type='compute', - driver_name='foo', - size_id=2) - price2 = libcloud.pricing.get_size_price(driver_type='compute', - driver_name='foo', - size_id='3') - self.assertEqual(price1, 2) - self.assertEqual(price2, 3) - - def test_invalid_pricing_cache(self): - libcloud.pricing.PRICING_DATA['compute']['foo'] = { 2: 2 } - self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) - - libcloud.pricing.invalidate_pricing_cache() - self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) - - def test_invalid_module_pricing_cache(self): - libcloud.pricing.PRICING_DATA['compute']['foo'] = { 1:1 } - - self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) - - libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', - driver_name='foo') - self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) - libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', - driver_name='foo1') - - def test_set_pricing(self): - self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) - - libcloud.pricing.set_pricing(driver_type='compute', driver_name='foo', - pricing={'foo': 1}) - self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) - diff -Nru libcloud-0.5.0/test/test_utils.py libcloud-0.15.1/test/test_utils.py --- libcloud-0.5.0/test/test_utils.py 2011-03-24 19:47:22.000000000 +0000 +++ libcloud-0.15.1/test/test_utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed to the Apache Software Foundation (ASF) under one or moreĀ§ -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import unittest -import warnings -import os.path - -# In Python > 2.7 DeprecationWarnings are disabled by default -warnings.simplefilter('default') - -import libcloud.utils -from libcloud.compute.types import Provider -from libcloud.compute.providers import DRIVERS - -WARNINGS_BUFFER = [] - -def show_warning(msg, cat, fname, lno, line=None): - WARNINGS_BUFFER.append((msg, cat, fname, lno)) - -original_func = warnings.showwarning - -class TestUtils(unittest.TestCase): - def setUp(self): - global WARNINGS_BUFFER - WARNINGS_BUFFER = [] - - def tearDown(self): - global WARNINGS_BUFFER - WARNINGS_BUFFER = [] - warnings.showwarning = original_func - - def test_guess_file_mime_type(self): - file_path = os.path.abspath(__file__) - mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path) - - self.assertTrue(mimetype.find('python') != -1) - - def test_get_driver(self): - driver = libcloud.utils.get_driver(drivers=DRIVERS, - provider=Provider.DUMMY) - self.assertTrue(driver is not None) - - try: - driver = libcloud.utils.get_driver(drivers=DRIVERS, - provider='fooba') - except AttributeError: - pass - else: - self.fail('Invalid provider, but an exception was not thrown') - - def test_deprecated_warning(self): - warnings.showwarning = show_warning - - libcloud.utils.SHOW_DEPRECATION_WARNING = False - self.assertEqual(len(WARNINGS_BUFFER), 0) - libcloud.utils.deprecated_warning('test_module') - self.assertEqual(len(WARNINGS_BUFFER), 0) - - libcloud.utils.SHOW_DEPRECATION_WARNING = True - self.assertEqual(len(WARNINGS_BUFFER), 0) - libcloud.utils.deprecated_warning('test_module') - self.assertEqual(len(WARNINGS_BUFFER), 1) - - def test_in_development_warning(self): - warnings.showwarning = show_warning - - libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False - self.assertEqual(len(WARNINGS_BUFFER), 0) - libcloud.utils.in_development_warning('test_module') - self.assertEqual(len(WARNINGS_BUFFER), 0) - - libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True - self.assertEqual(len(WARNINGS_BUFFER), 0) - libcloud.utils.in_development_warning('test_module') - self.assertEqual(len(WARNINGS_BUFFER), 1) - -if __name__ == '__main__': - sys.exit(unittest.main()) diff -Nru libcloud-0.5.0/tox.ini libcloud-0.15.1/tox.ini --- libcloud-0.5.0/tox.ini 1970-01-01 00:00:00.000000000 +0000 +++ libcloud-0.15.1/tox.ini 2014-06-11 14:27:59.000000000 +0000 @@ -0,0 +1,61 @@ +[tox] +envlist = py25,py26,py27,pypy,py32,py33,py34,lint +setenv = + PIP_USE_MIRRORS=1 + +[testenv] +deps = mock + unittest2 + lockfile + paramiko +commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py + python setup.py test + +[testenv:py26] +deps = mock + unittest2 + lockfile + paramiko + coverage +commands = cp libcloud/test/secrets.py-dist libcloud/test/secrets.py + python setup.py test + python setup.py coverage + +[testenv:py25] +setenv = PIP_INSECURE=1 +deps = mock + unittest2 + lockfile + ssl + simplejson + paramiko + +[testenv:py32] +deps = mock + lockfile + +[testenv:py33] +deps = mock + lockfile + +[testenv:py34] +# At some point we can switch to use the stdlib provided mock module on +# Python3.4+ +deps = mock + lockfile + +[testenv:docs] +deps = sphinx +basepython = python2.7 +changedir = docs +commands = python ../contrib/generate_provider_feature_matrix_table.py + sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html + +[testenv:lint] +deps = flake8 +commands = flake8 --exclude="test" libcloud/ + flake8 --max-line-length=160 libcloud/test/ + flake8 demos/ + flake8 --ignore=E902 docs/examples/ + flake8 --ignore=E902 contrib/ + python -mjson.tool libcloud/data/pricing.json